HDFS-4798. Update computeContentSummary() for the reference nodes in snapshots.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1479671 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-05-06 18:48:24 +00:00
parent 72d783374c
commit 424b270c94
13 changed files with 70 additions and 96 deletions

View File

@ -335,3 +335,6 @@ Branch-2802 Snapshot (Unreleased)
HDFS-4791. Update and fix deletion of reference inode. (Jing Zhao via
szetszwo)
HDFS-4798. Update computeContentSummary() for the reference nodes in
snapshots. (szetszwo)

View File

@ -42,16 +42,20 @@ public enum Content {
/** Content counts. */
public static class Counts extends EnumCounters<Content> {
public static Counts newInstance() {
return new Counts();
}
private Counts() {
super(Content.values());
}
}
static final EnumCounters.Factory<Content, Counts> FACTORY
private static final EnumCounters.Factory<Content, Counts> FACTORY
= new EnumCounters.Factory<Content, Counts>() {
@Override
public Counts newInstance() {
return new Counts();
return Counts.newInstance();
}
};

View File

@ -33,7 +33,6 @@ import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.DstReference;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithName;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
@ -369,23 +368,14 @@ public abstract class INode implements Diff.Element<byte[]> {
/** Compute {@link ContentSummary}. */
public final ContentSummary computeContentSummary() {
final Content.Counts current = computeContentSummary(
new Content.CountsMap()).getCounts(Key.CURRENT);
return new ContentSummary(current.get(Content.LENGTH),
current.get(Content.FILE) + current.get(Content.SYMLINK),
current.get(Content.DIRECTORY), getNsQuota(),
current.get(Content.DISKSPACE), getDsQuota());
final Content.Counts counts = computeContentSummary(
Content.Counts.newInstance());
return new ContentSummary(counts.get(Content.LENGTH),
counts.get(Content.FILE) + counts.get(Content.SYMLINK),
counts.get(Content.DIRECTORY), getNsQuota(),
counts.get(Content.DISKSPACE), getDsQuota());
}
/**
* Count subtree content summary with a {@link Content.CountsMap}.
*
* @param countsMap The subtree counts for returning.
* @return The same objects as the counts parameter.
*/
public abstract Content.CountsMap computeContentSummary(
Content.CountsMap countsMap);
/**
* Count subtree content summary with a {@link Content.Counts}.
*

View File

@ -29,7 +29,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectoryWithSnapshot;
@ -473,16 +472,6 @@ public class INodeDirectory extends INodeWithAdditionalFields {
return counts;
}
@Override
public Content.CountsMap computeContentSummary(
final Content.CountsMap countsMap) {
for (INode child : getChildrenList(null)) {
child.computeContentSummary(countsMap);
}
countsMap.getCounts(Key.CURRENT).add(Content.DIRECTORY, 1);
return countsMap;
}
/**
* @param snapshot
* if it is not null, get the result from the given snapshot;

View File

@ -106,15 +106,6 @@ public class INodeDirectoryWithQuota extends INodeDirectory {
return counts;
}
@Override
public Content.CountsMap computeContentSummary(
final Content.CountsMap countsMap) {
final long original = countsMap.sum(Content.DISKSPACE);
super.computeContentSummary(countsMap);
checkDiskspace(countsMap.sum(Content.DISKSPACE) - original);
return countsMap;
}
@Override
public Content.Counts computeContentSummary(
final Content.Counts counts) {

View File

@ -31,10 +31,9 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockCollection;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.FileDiff;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileDiffList;
import org.apache.hadoop.hdfs.server.namenode.snapshot.FileWithSnapshot.Util;
import org.apache.hadoop.hdfs.server.namenode.snapshot.INodeFileWithSnapshot;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
@ -359,15 +358,6 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect
return counts;
}
@Override
public final Content.CountsMap computeContentSummary(
final Content.CountsMap countsMap) {
computeContentSummary4Snapshot(countsMap.getCounts(Key.SNAPSHOT));
computeContentSummary4Current(countsMap.getCounts(Key.CURRENT));
return countsMap;
}
@Override
public final Content.Counts computeContentSummary(
final Content.Counts counts) {

View File

@ -22,7 +22,6 @@ import java.util.List;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap;
import org.apache.hadoop.hdfs.server.namenode.Quota.Counts;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
import org.apache.hadoop.hdfs.util.GSet;
@ -112,11 +111,6 @@ public class INodeMap {
return null;
}
@Override
public CountsMap computeContentSummary(CountsMap countsMap) {
return null;
}
@Override
public Counts cleanSubtree(Snapshot snapshot, Snapshot prior,
BlocksMapUpdateInfo collectedBlocks, List<INode> removedINodes)

View File

@ -269,12 +269,7 @@ public abstract class INodeReference extends INode {
}
@Override
public final Content.CountsMap computeContentSummary(Content.CountsMap countsMap) {
return referred.computeContentSummary(countsMap);
}
@Override
public final Content.Counts computeContentSummary(Content.Counts counts) {
public Content.Counts computeContentSummary(Content.Counts counts) {
return referred.computeContentSummary(counts);
}
@ -462,6 +457,15 @@ public abstract class INodeReference extends INode {
return lastSnapshotId;
}
@Override
public final Content.Counts computeContentSummary(Content.Counts counts) {
//only count diskspace for WithName
final Quota.Counts q = Quota.Counts.newInstance();
computeQuotaUsage(q, false, lastSnapshotId);
counts.add(Content.DISKSPACE, q.get(Quota.DISKSPACE));
return counts;
}
@Override
public final Quota.Counts computeQuotaUsage(Quota.Counts counts,
boolean useCache, int lastSnapshotId) {

View File

@ -24,7 +24,6 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.snapshot.Snapshot;
/**
@ -97,13 +96,6 @@ public class INodeSymlink extends INodeWithAdditionalFields {
return counts;
}
@Override
public Content.CountsMap computeContentSummary(
final Content.CountsMap countsMap) {
computeContentSummary(countsMap.getCounts(Key.CURRENT));
return countsMap;
}
@Override
public Content.Counts computeContentSummary(final Content.Counts counts) {
counts.add(Content.SYMLINK, 1);

View File

@ -37,7 +37,6 @@ import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.namenode.Content;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
import org.apache.hadoop.hdfs.server.namenode.INodeFile;
@ -347,16 +346,6 @@ public class INodeDirectorySnapshottable extends INodeDirectoryWithSnapshot {
return counts;
}
@Override
public Content.CountsMap computeContentSummary(
final Content.CountsMap countsMap) {
super.computeContentSummary(countsMap);
countsMap.getCounts(Key.SNAPSHOT).add(Content.SNAPSHOT,
snapshotsByNames.size());
countsMap.getCounts(Key.CURRENT).add(Content.SNAPSHOTTABLE_DIRECTORY, 1);
return countsMap;
}
/**
* Compute the difference between two snapshots (or a snapshot and the current
* directory) of the directory.

View File

@ -30,7 +30,6 @@ import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffReportEntry;
import org.apache.hadoop.hdfs.protocol.SnapshotDiffReport.DiffType;
import org.apache.hadoop.hdfs.server.namenode.Content;
import org.apache.hadoop.hdfs.server.namenode.Content.CountsMap.Key;
import org.apache.hadoop.hdfs.server.namenode.FSImageSerialization;
import org.apache.hadoop.hdfs.server.namenode.INode;
import org.apache.hadoop.hdfs.server.namenode.INodeDirectory;
@ -819,14 +818,6 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota {
return counts;
}
@Override
public Content.CountsMap computeContentSummary(
final Content.CountsMap countsMap) {
super.computeContentSummary(countsMap);
computeContentSummary4Snapshot(countsMap.getCounts(Key.SNAPSHOT));
return countsMap;
}
@Override
public Content.Counts computeContentSummary(final Content.Counts counts) {
super.computeContentSummary(counts);

View File

@ -353,13 +353,50 @@ public class Diff<K, E extends Diff.Element<K>> {
private static <K, E extends Diff.Element<K>> List<E> apply2Previous(
final List<E> previous, final List<E> clist, final List<E> dlist) {
final List<E> current = new ArrayList<E>(previous);
for(E d : dlist) {
current.remove(d);
// Assumptions:
// (A1) All lists are sorted.
// (A2) All elements in dlist must be in previous.
// (A3) All elements in clist must be not in tmp = previous - dlist.
final List<E> tmp = new ArrayList<E>();
{
// tmp = previous - dlist
final Iterator<E> i = previous.iterator();
for(E deleted : dlist) {
E e = i.next(); //since dlist is non-empty, e must exist by (A2).
int cmp = 0;
for(; (cmp = e.compareTo(deleted.getKey())) < 0; e = i.next()) {
tmp.add(e);
}
Preconditions.checkState(cmp == 0); // check (A2)
}
for(; i.hasNext(); ) {
tmp.add(i.next());
}
}
for(E c : clist) {
final int i = search(current, c.getKey());
current.add(-i - 1, c);
final List<E> current = new ArrayList<E>();
{
// current = tmp + clist
final Iterator<E> tmpIterator = tmp.iterator();
final Iterator<E> cIterator = clist.iterator();
E t = tmpIterator.hasNext()? tmpIterator.next(): null;
E c = cIterator.hasNext()? cIterator.next(): null;
for(; t != null || c != null; ) {
final int cmp = c == null? 1
: t == null? -1
: c.compareTo(t.getKey());
if (cmp < 0) {
current.add(c);
c = cIterator.hasNext()? cIterator.next(): null;
} else if (cmp > 0) {
current.add(t);
t = tmpIterator.hasNext()? tmpIterator.next(): null;
} else {
throw new AssertionError("Violated assumption (A3).");
}
}
}
return current;
}

View File

@ -48,7 +48,7 @@ public class TestDiff {
/** Test directory diff. */
@Test(timeout=60000)
public void testDiff() throws Exception {
for(int startSize = 0; startSize <= 1000; startSize = nextStep(startSize)) {
for(int startSize = 0; startSize <= 10000; startSize = nextStep(startSize)) {
for(int m = 0; m <= 10000; m = nextStep(m)) {
runDiffTest(startSize, m);
}