diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 0782e9d0f78..faeb6a2008d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -838,6 +838,9 @@ Release 2.1.0-beta - UNRELEASED HDFS-4850. Fix OfflineImageViewer to work on fsimages with empty files or snapshots. (jing9) + HDFS-4877. Snapshot: fix the scenario where a directory is renamed under + its prior descendant. (jing9) + Release 2.0.5-alpha - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 3820a4e8ed6..7539362064e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -678,7 +678,7 @@ public class FSDirectory implements Closeable { Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); newSrcCounts.subtract(oldSrcCounts); srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), - newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID); + newSrcCounts.get(Quota.DISKSPACE), false); } return true; @@ -944,8 +944,8 @@ public class FSDirectory implements Closeable { BlocksMapUpdateInfo collectedBlocks = new BlocksMapUpdateInfo(); List removedINodes = new ArrayList(); filesDeleted = removedDst.cleanSubtree(null, - dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes).get( - Quota.NAMESPACE); + dstIIP.getLatestSnapshot(), collectedBlocks, removedINodes, true) + .get(Quota.NAMESPACE); getFSNamesystem().removePathAndBlocks(src, collectedBlocks, removedINodes); } @@ -963,7 +963,7 @@ public class FSDirectory implements Closeable { Quota.Counts.newInstance(), false, Snapshot.INVALID_ID); newSrcCounts.subtract(oldSrcCounts); srcParent.addSpaceConsumed(newSrcCounts.get(Quota.NAMESPACE), - newSrcCounts.get(Quota.DISKSPACE), false, Snapshot.INVALID_ID); + newSrcCounts.get(Quota.DISKSPACE), false); } return filesDeleted >= 0; @@ -1408,9 +1408,9 @@ public class FSDirectory implements Closeable { targetNode.destroyAndCollectBlocks(collectedBlocks, removedINodes); } else { Quota.Counts counts = targetNode.cleanSubtree(null, latestSnapshot, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, true); parent.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID); + -counts.get(Quota.DISKSPACE), true); removed = counts.get(Quota.NAMESPACE); } if (NameNode.stateChangeLog.isDebugEnabled()) { @@ -1797,7 +1797,8 @@ public class FSDirectory implements Closeable { final INode[] inodes = inodesInPath.getINodes(); for(int i=0; i < numOfINodes; i++) { if (inodes[i].isQuotaSet()) { // a directory with quota - INodeDirectoryWithQuota node =(INodeDirectoryWithQuota)inodes[i]; + INodeDirectoryWithQuota node = (INodeDirectoryWithQuota) inodes[i] + .asDirectory(); node.addSpaceConsumed2Cache(nsDelta, dsDelta); } } @@ -2033,7 +2034,8 @@ public class FSDirectory implements Closeable { } if (inodes[i].isQuotaSet()) { // a directory with quota try { - ((INodeDirectoryWithQuota)inodes[i]).verifyQuota(nsDelta, dsDelta); + ((INodeDirectoryWithQuota) inodes[i].asDirectory()).verifyQuota( + nsDelta, dsDelta); } catch (QuotaExceededException e) { e.setPathName(getFullPathName(inodes, i)); throw e; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java index 2cc2a9931e1..6c6531c3f2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java @@ -342,12 +342,13 @@ public abstract class INode implements Diff.Element { * deletion/update will be added to the given map. * @param removedINodes * INodes collected from the descents for further cleaning up of - * inodeMap + * inodeMap * @return quota usage delta when deleting a snapshot */ public abstract Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, BlocksMapUpdateInfo collectedBlocks, - List removedINodes) throws QuotaExceededException; + List removedINodes, boolean countDiffChange) + throws QuotaExceededException; /** * Destroy self and clear everything! If the INode is a file, this method @@ -388,17 +389,10 @@ public abstract class INode implements Diff.Element { * Check and add namespace/diskspace consumed to itself and the ancestors. * @throws QuotaExceededException if quote is violated. */ - public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify, - int snapshotId) throws QuotaExceededException { + public void addSpaceConsumed(long nsDelta, long dsDelta, boolean verify) + throws QuotaExceededException { if (parent != null) { - parent.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); - } - } - - public void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { - if (parent != null) { - parent.addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId); + parent.addSpaceConsumed(nsDelta, dsDelta, verify); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java index 78aa46b30fd..7e72fa32286 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java @@ -23,6 +23,7 @@ import java.util.ArrayList; import java.util.Collections; import java.util.Iterator; import java.util.List; +import java.util.Map; import org.apache.hadoop.fs.PathIsNotDirectoryException; import org.apache.hadoop.fs.UnresolvedLinkException; @@ -499,7 +500,8 @@ public class INodeDirectory extends INodeWithAdditionalFields { /** Call cleanSubtree(..) recursively down the subtree. */ public Quota.Counts cleanSubtreeRecursively(final Snapshot snapshot, Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + final List removedINodes, final Map excludedNodes, + final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); // in case of deletion snapshot, since this call happens after we modify // the diff list, the snapshot to be deleted has been combined or renamed @@ -508,9 +510,14 @@ public class INodeDirectory extends INodeWithAdditionalFields { // INodeDirectoryWithSnapshot#cleanSubtree) Snapshot s = snapshot != null && prior != null ? prior : snapshot; for (INode child : getChildrenList(s)) { - Quota.Counts childCounts = child.cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); - counts.add(childCounts); + if (snapshot != null && excludedNodes != null + && excludedNodes.containsKey(child)) { + continue; + } else { + Quota.Counts childCounts = child.cleanSubtree(snapshot, prior, + collectedBlocks, removedINodes, countDiffChange); + counts.add(childCounts); + } } return counts; } @@ -527,8 +534,9 @@ public class INodeDirectory extends INodeWithAdditionalFields { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) + throws QuotaExceededException { if (prior == null && snapshot == null) { // destroy the whole subtree and collect blocks that should be deleted Quota.Counts counts = Quota.Counts.newInstance(); @@ -538,7 +546,7 @@ public class INodeDirectory extends INodeWithAdditionalFields { } else { // process recursively down the subtree Quota.Counts counts = cleanSubtreeRecursively(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, null, countDiffChange); if (isQuotaSet()) { ((INodeDirectoryWithQuota) this).addSpaceConsumed2Cache( -counts.get(Quota.NAMESPACE), -counts.get(Quota.DISKSPACE)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java index b623fe5c06e..f2b607ccaa7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java @@ -132,7 +132,7 @@ public class INodeDirectoryWithQuota extends INodeDirectory { @Override public final void addSpaceConsumed(final long nsDelta, final long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { + boolean verify) throws QuotaExceededException { if (isQuotaSet()) { // The following steps are important: // check quotas in this inode and all ancestors before changing counts @@ -143,11 +143,11 @@ public class INodeDirectoryWithQuota extends INodeDirectory { verifyQuota(nsDelta, dsDelta); } // (2) verify quota and then add count in ancestors - super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); + super.addSpaceConsumed(nsDelta, dsDelta, verify); // (3) add count in this inode addSpaceConsumed2Cache(nsDelta, dsDelta); } else { - super.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); + super.addSpaceConsumed(nsDelta, dsDelta, verify); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java index 2ffc71c16db..ff3f9ca5910 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java @@ -286,7 +286,8 @@ public class INodeFile extends INodeWithAdditionalFields implements BlockCollect @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); if (snapshot == null && prior == null) { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java index a5c4d6f51bd..ca9e10170fc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeMap.java @@ -113,8 +113,8 @@ public class INodeMap { @Override public Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks, List removedINodes) - throws QuotaExceededException { + BlocksMapUpdateInfo collectedBlocks, List removedINodes, + boolean countDiffChange) throws QuotaExceededException { return null; } }; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java index 3e9c71e2976..22d0a487e0a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeReference.java @@ -254,10 +254,10 @@ public abstract class INodeReference extends INode { @Override // used by WithCount public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks, final List removedINodes) - throws QuotaExceededException { + BlocksMapUpdateInfo collectedBlocks, final List removedINodes, + final boolean countDiffChange) throws QuotaExceededException { return referred.cleanSubtree(snapshot, prior, collectedBlocks, - removedINodes); + removedINodes, countDiffChange); } @Override // used by WithCount @@ -396,29 +396,6 @@ public abstract class INodeReference extends INode { return withNameList.get(-i - 2); } } - - @Override - public final void addSpaceConsumed(long nsDelta, long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { - INodeReference parentRef = getParentReference(); - if (parentRef != null) { - parentRef.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); - } - addSpaceConsumedToRenameSrc(nsDelta, dsDelta, verify, snapshotId); - } - - @Override - public final void addSpaceConsumedToRenameSrc(long nsDelta, long dsDelta, - boolean verify, int snapshotId) throws QuotaExceededException { - if (snapshotId != Snapshot.INVALID_ID) { - for (INodeReference.WithName withName : withNameList) { - if (withName.getLastSnapshotId() >= snapshotId) { - withName.addSpaceConsumed(nsDelta, dsDelta, verify, snapshotId); - break; - } - } - } - } } /** A reference with a fixed name. */ @@ -476,15 +453,20 @@ public abstract class INodeReference extends INode { Preconditions.checkState(this.lastSnapshotId >= lastSnapshotId); final INode referred = this.getReferredINode().asReference() .getReferredINode(); - // we cannot use cache for the referred node since its cached quota may - // have already been updated by changes in the current tree - return referred.computeQuotaUsage(counts, false, this.lastSnapshotId); + // We will continue the quota usage computation using the same snapshot id + // as time line (if the given snapshot id is valid). Also, we cannot use + // cache for the referred node since its cached quota may have already + // been updated by changes in the current tree. + int id = lastSnapshotId > Snapshot.INVALID_ID ? + lastSnapshotId : this.lastSnapshotId; + return referred.computeQuotaUsage(counts, false, id); } @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + final List removedINodes, final boolean countDiffChange) + throws QuotaExceededException { // since WithName node resides in deleted list acting as a snapshot copy, // the parameter snapshot must be non-null Preconditions.checkArgument(snapshot != null); @@ -500,11 +482,19 @@ public abstract class INodeReference extends INode { } Quota.Counts counts = getReferredINode().cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, false); INodeReference ref = getReferredINode().getParentReference(); if (ref != null) { ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID); + -counts.get(Quota.DISKSPACE), true); + } + + if (snapshot.getId() < lastSnapshotId) { + // for a WithName node, when we compute its quota usage, we only count + // in all the nodes existing at the time of the corresponding rename op. + // Thus if we are deleting a snapshot before/at the snapshot associated + // with lastSnapshotId, we do not need to update the quota upwards. + counts = Quota.Counts.newInstance(); } return counts; } @@ -529,16 +519,17 @@ public abstract class INodeReference extends INode { // 1. create snapshot s1 on /test // 2. rename /test/foo/bar to /test/foo2/bar // 3. create snapshot s2 on /test - // 4. delete snapshot s2 + // 4. rename foo2 again + // 5. delete snapshot s2 return; } try { Quota.Counts counts = referred.cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, false); INodeReference ref = getReferredINode().getParentReference(); if (ref != null) { ref.addSpaceConsumed(-counts.get(Quota.NAMESPACE), - -counts.get(Quota.DISKSPACE), true, Snapshot.INVALID_ID); + -counts.get(Quota.DISKSPACE), true); } } catch (QuotaExceededException e) { LOG.error("should not exceed quota while snapshot deletion", e); @@ -588,8 +579,8 @@ public abstract class INodeReference extends INode { @Override public Quota.Counts cleanSubtree(Snapshot snapshot, Snapshot prior, - BlocksMapUpdateInfo collectedBlocks, List removedINodes) - throws QuotaExceededException { + BlocksMapUpdateInfo collectedBlocks, List removedINodes, + final boolean countDiffChange) throws QuotaExceededException { if (snapshot == null && prior == null) { Quota.Counts counts = Quota.Counts.newInstance(); this.computeQuotaUsage(counts, true); @@ -609,7 +600,7 @@ public abstract class INodeReference extends INode { return Quota.Counts.newInstance(); } return getReferredINode().cleanSubtree(snapshot, prior, - collectedBlocks, removedINodes); + collectedBlocks, removedINodes, countDiffChange); } } @@ -648,8 +639,11 @@ public abstract class INodeReference extends INode { sfile.deleteCurrentFile(); if (snapshot != null) { try { + // when calling cleanSubtree of the referred node, since we + // compute quota usage updates before calling this destroy + // function, we use true for countDiffChange referred.cleanSubtree(snapshot, prior, collectedBlocks, - removedINodes); + removedINodes, true); } catch (QuotaExceededException e) { LOG.error("should not exceed quota while snapshot deletion", e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java index 653ec8d3d97..ba41a2ecdb0 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java @@ -76,7 +76,8 @@ public class INodeSymlink extends INodeWithAdditionalFields { @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) { + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) { if (snapshot == null && prior == null) { destroyAndCollectBlocks(collectedBlocks, removedINodes); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java index 88879cedc68..23ca4ea9e73 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/AbstractINodeDiffList.java @@ -68,7 +68,8 @@ abstract class AbstractINodeDiffList removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, boolean countDiffChange) throws QuotaExceededException { int snapshotIndex = Collections.binarySearch(diffs, snapshot.getId()); @@ -80,14 +81,14 @@ abstract class AbstractINodeDiffList removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); + Map priorCreated = null; + Map priorDeleted = null; if (snapshot == null) { // delete the current directory recordModification(prior, null); // delete everything in created list @@ -706,8 +711,28 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { } else { // update prior prior = getDiffs().updatePrior(snapshot, prior); + // if there is a snapshot diff associated with prior, we need to record + // its original created and deleted list before deleting post + if (prior != null) { + DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); + if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { + List cList = priorDiff.diff.getList(ListType.CREATED); + List dList = priorDiff.diff.getList(ListType.DELETED); + priorCreated = new HashMap(cList.size()); + for (INode cNode : cList) { + priorCreated.put(cNode, cNode); + } + priorDeleted = new HashMap(dList.size()); + for (INode dNode : dList) { + priorDeleted.put(dNode, dNode); + } + } + } + counts.add(getDiffs().deleteSnapshotDiff(snapshot, prior, this, - collectedBlocks, removedINodes)); + collectedBlocks, removedINodes, countDiffChange)); + + // check priorDiff again since it may be created during the diff deletion if (prior != null) { DirectoryDiff priorDiff = this.getDiffs().getDiff(prior); if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { @@ -716,11 +741,17 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { // use null as prior in the cleanSubtree call. Files/directories that // were created before "prior" will be covered by the later // cleanSubtreeRecursively call. - for (INode cNode : priorDiff.getChildrenDiff().getList( - ListType.CREATED)) { - counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks, - removedINodes)); + if (priorCreated != null) { + // we only check the node originally in prior's created list + for (INode cNode : priorDiff.getChildrenDiff().getList( + ListType.CREATED)) { + if (priorCreated.containsKey(cNode)) { + counts.add(cNode.cleanSubtree(snapshot, null, collectedBlocks, + removedINodes, countDiffChange)); + } + } } + // When a directory is moved from the deleted list of the posterior // diff to the deleted list of this diff, we need to destroy its // descendants that were 1) created after taking this diff and 2) @@ -728,16 +759,19 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { // For files moved from posterior's deleted list, we also need to // delete its snapshot copy associated with the posterior snapshot. + for (INode dNode : priorDiff.getChildrenDiff().getList( ListType.DELETED)) { - counts.add(cleanDeletedINode(dNode, snapshot, prior, - collectedBlocks, removedINodes)); + if (priorDeleted == null || !priorDeleted.containsKey(dNode)) { + counts.add(cleanDeletedINode(dNode, snapshot, prior, + collectedBlocks, removedINodes, countDiffChange)); + } } } } } counts.add(cleanSubtreeRecursively(snapshot, prior, collectedBlocks, - removedINodes)); + removedINodes, priorDeleted, countDiffChange)); if (isQuotaSet()) { this.addSpaceConsumed2Cache(-counts.get(Quota.NAMESPACE), @@ -755,9 +789,11 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { * @param collectedBlocks Used to collect blocks for later deletion. * @return Quota usage update. */ - private static Quota.Counts cleanDeletedINode(INode inode, final Snapshot post, - final Snapshot prior, final BlocksMapUpdateInfo collectedBlocks, - final List removedINodes) throws QuotaExceededException { + private static Quota.Counts cleanDeletedINode(INode inode, + final Snapshot post, final Snapshot prior, + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) + throws QuotaExceededException { Quota.Counts counts = Quota.Counts.newInstance(); Deque queue = new ArrayDeque(); queue.addLast(inode); @@ -766,7 +802,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { if (topNode instanceof INodeReference.WithName) { INodeReference.WithName wn = (INodeReference.WithName) topNode; if (wn.getLastSnapshotId() >= post.getId()) { - wn.cleanSubtree(post, prior, collectedBlocks, removedINodes); + wn.cleanSubtree(post, prior, collectedBlocks, removedINodes, + countDiffChange); } // For DstReference node, since the node is not in the created list of // prior, we should treat it as regular file/dir @@ -774,20 +811,28 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { && topNode.asFile() instanceof FileWithSnapshot) { FileWithSnapshot fs = (FileWithSnapshot) topNode.asFile(); counts.add(fs.getDiffs().deleteSnapshotDiff(post, prior, - topNode.asFile(), collectedBlocks, removedINodes)); + topNode.asFile(), collectedBlocks, removedINodes, countDiffChange)); } else if (topNode.isDirectory()) { INodeDirectory dir = topNode.asDirectory(); + ChildrenDiff priorChildrenDiff = null; if (dir instanceof INodeDirectoryWithSnapshot) { // delete files/dirs created after prior. Note that these // files/dirs, along with inode, were deleted right after post. INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) dir; DirectoryDiff priorDiff = sdir.getDiffs().getDiff(prior); if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { - counts.add(priorDiff.diff.destroyCreatedList(sdir, + priorChildrenDiff = priorDiff.getChildrenDiff(); + counts.add(priorChildrenDiff.destroyCreatedList(sdir, collectedBlocks, removedINodes)); } } + for (INode child : dir.getChildrenList(prior)) { + if (priorChildrenDiff != null + && priorChildrenDiff.search(ListType.DELETED, + child.getLocalNameBytes()) != null) { + continue; + } queue.addLast(child); } } @@ -864,29 +909,39 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { if (inode instanceof INodeReference.WithName && snapshot != null) { // this inode has been renamed before the deletion of the DstReference // subtree - inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes); + inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, + true); } else { // for DstReference node, continue this process to its subtree destroyDstSubtree(inode.asReference().getReferredINode(), snapshot, prior, collectedBlocks, removedINodes); } } else if (inode.isFile() && snapshot != null) { - inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes); + inode.cleanSubtree(snapshot, prior, collectedBlocks, removedINodes, true); } else if (inode.isDirectory()) { + Map excludedNodes = null; if (inode instanceof INodeDirectoryWithSnapshot) { INodeDirectoryWithSnapshot sdir = (INodeDirectoryWithSnapshot) inode; DirectoryDiffList diffList = sdir.getDiffs(); if (snapshot != null) { diffList.deleteSnapshotDiff(snapshot, prior, sdir, collectedBlocks, - removedINodes); + removedINodes, true); } DirectoryDiff priorDiff = diffList.getDiff(prior); if (priorDiff != null && priorDiff.getSnapshot().equals(prior)) { priorDiff.diff.destroyCreatedList(sdir, collectedBlocks, removedINodes); + List dList = priorDiff.diff.getList(ListType.DELETED); + excludedNodes = new HashMap(dList.size()); + for (INode dNode : dList) { + excludedNodes.put(dNode, dNode); + } } } for (INode child : inode.asDirectory().getChildrenList(prior)) { + if (excludedNodes != null && excludedNodes.containsKey(child)) { + continue; + } destroyDstSubtree(child, snapshot, prior, collectedBlocks, removedINodes); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java index 80e3ca8d0d2..28f33123b56 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileUnderConstructionWithSnapshot.java @@ -104,7 +104,8 @@ public class INodeFileUnderConstructionWithSnapshot @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { if (snapshot == null) { // delete the current file recordModification(prior, null); @@ -114,7 +115,7 @@ public class INodeFileUnderConstructionWithSnapshot } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks, - removedINodes); + removedINodes, countDiffChange); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java index 06e833b096a..7f37666f880 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeFileWithSnapshot.java @@ -91,7 +91,8 @@ public class INodeFileWithSnapshot extends INodeFile @Override public Quota.Counts cleanSubtree(final Snapshot snapshot, Snapshot prior, - final BlocksMapUpdateInfo collectedBlocks, final List removedINodes) + final BlocksMapUpdateInfo collectedBlocks, + final List removedINodes, final boolean countDiffChange) throws QuotaExceededException { if (snapshot == null) { // delete the current file recordModification(prior, null); @@ -101,7 +102,7 @@ public class INodeFileWithSnapshot extends INodeFile } else { // delete a snapshot prior = getDiffs().updatePrior(snapshot, prior); return diffs.deleteSnapshotDiff(snapshot, prior, this, collectedBlocks, - removedINodes); + removedINodes, countDiffChange); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 9c2279fdf1c..90bfa26ed71 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -454,7 +454,7 @@ public class TestRenameWithSnapshots { // delete snapshot s5. The diff of s5 should be combined to s4 hdfs.deleteSnapshot(sdir1, "s5"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); assertFalse(hdfs.exists(bar2_s5)); final Path bar2_s4 = SnapshotTestHelper.getSnapshotPath(sdir1, "s4", "foo/bar2"); @@ -492,7 +492,7 @@ public class TestRenameWithSnapshots { assertFalse(hdfs.exists(bar3_s2)); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete snapshot s2. hdfs.deleteSnapshot(sdir2, "s2"); @@ -500,14 +500,15 @@ public class TestRenameWithSnapshots { assertFalse(hdfs.exists(bar2_s2)); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s3"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s1"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } - private void restartClusterAndCheckImage() throws IOException { + private void restartClusterAndCheckImage(boolean compareQuota) + throws IOException { File fsnBefore = new File(testDir, "dumptree_before"); File fsnMiddle = new File(testDir, "dumptree_middle"); File fsnAfter = new File(testDir, "dumptree_after"); @@ -538,8 +539,10 @@ public class TestRenameWithSnapshots { // dump the namespace loaded from fsimage SnapshotTestHelper.dumpTree2File(fsdir, fsnAfter); - SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle, true); - SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, true); + SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnMiddle, + compareQuota); + SnapshotTestHelper.compareDumpedTreeInFile(fsnBefore, fsnAfter, + compareQuota); } /** @@ -579,7 +582,7 @@ public class TestRenameWithSnapshots { // delete snapshot s5. hdfs.deleteSnapshot(sdir1, "s5"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); assertFalse(hdfs.exists(foo_s5)); status = hdfs.getFileStatus(foo_s4); assertEquals(REPL_1, status.getReplication()); @@ -604,18 +607,18 @@ public class TestRenameWithSnapshots { .getLocalName()); // restart cluster - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete snapshot s2. hdfs.deleteSnapshot(sdir2, "s2"); assertFalse(hdfs.exists(foo_s2)); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s3"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); hdfs.deleteSnapshot(sdir1, "s1"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -650,7 +653,7 @@ public class TestRenameWithSnapshots { hdfs.rename(bar2_dir1, bar2_dir2); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // modification on /dir2/foo and /dir2/bar final Path bar1_dir2 = new Path(foo_dir2, "bar1"); @@ -686,7 +689,7 @@ public class TestRenameWithSnapshots { hdfs.rename(bar2_dir2, bar2_dir3); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // modification on /dir3/foo and /dir3/bar final Path bar1_dir3 = new Path(foo_dir3, "bar1"); @@ -718,7 +721,7 @@ public class TestRenameWithSnapshots { hdfs.rename(bar2_dir3, bar2_dir2); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // modification on /dir2/foo hdfs.setReplication(bar1_dir2, REPL); @@ -773,14 +776,14 @@ public class TestRenameWithSnapshots { .getLocalName()); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete foo hdfs.delete(foo_dir1, true); hdfs.delete(bar2_dir1, true); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // check assertTrue(hdfs.exists(bar1_s1)); @@ -844,7 +847,7 @@ public class TestRenameWithSnapshots { hdfs.setReplication(bar_dir2, REPL_1); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // create snapshots SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s11"); @@ -863,7 +866,7 @@ public class TestRenameWithSnapshots { hdfs.setReplication(bar_dir3, REPL_2); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // create snapshots SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s111"); @@ -917,7 +920,7 @@ public class TestRenameWithSnapshots { hdfs.setReplication(bar_dir2, REPL); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // create snapshots SnapshotTestHelper.createSnapshot(hdfs, sdir1, "s1111"); @@ -999,14 +1002,14 @@ public class TestRenameWithSnapshots { assertEquals("s1", barDiffs.get(0).snapshot.getRoot().getLocalName()); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete foo hdfs.delete(foo_dir1, true); hdfs.delete(bar_dir1, true); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // check final Path bar1_s1111 = SnapshotTestHelper.getSnapshotPath(sdir1, "s1111", @@ -1127,7 +1130,7 @@ public class TestRenameWithSnapshots { hdfs.rename(foo, newfoo); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); final Path bar2 = new Path(newfoo, "bar2"); DFSTestUtil.createFile(hdfs, bar2, BLOCKSIZE, REPL, SEED); @@ -1145,7 +1148,7 @@ public class TestRenameWithSnapshots { // delete snapshot s4. The diff of s4 should be combined to s3 hdfs.deleteSnapshot(sdir1, "s4"); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); Path bar_s3 = SnapshotTestHelper.getSnapshotPath(sdir1, "s3", "foo/bar"); assertFalse(hdfs.exists(bar_s3)); @@ -1175,18 +1178,18 @@ public class TestRenameWithSnapshots { assertEquals("s2", diffs.get(0).snapshot.getRoot().getLocalName()); // restart the cluster and check fsimage - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // delete snapshot s2. hdfs.deleteSnapshot(sdir2, "s2"); assertFalse(hdfs.exists(bar_s2)); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); // make sure the whole referred subtree has been destroyed assertEquals(4, fsdir.getRoot().getNamespace()); assertEquals(0, fsdir.getRoot().getDiskspace()); hdfs.deleteSnapshot(sdir1, "s1"); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); assertEquals(3, fsdir.getRoot().getNamespace()); assertEquals(0, fsdir.getRoot().getDiskspace()); } @@ -1232,7 +1235,7 @@ public class TestRenameWithSnapshots { INode fooNode = fooRef.asFile(); assertTrue(fooNode instanceof INodeFileWithSnapshot); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -1724,7 +1727,7 @@ public class TestRenameWithSnapshots { cluster = new MiniDFSCluster.Builder(conf).format(false) .numDataNodes(REPL).build(); cluster.waitActive(); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -1784,7 +1787,108 @@ public class TestRenameWithSnapshots { final Path foo2 = new Path(bar2, "foo"); hdfs.rename(foo, foo2); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); + + // delete snap1 + hdfs.deleteSnapshot(sdir1, snap1); + + restartClusterAndCheckImage(true); + } + + /** + * move a directory to its prior descedant + */ + @Test + public void testRename2PreDescendant_2() throws Exception { + final Path root = new Path("/"); + final Path sdir1 = new Path("/dir1"); + final Path sdir2 = new Path("/dir2"); + final Path foo = new Path(sdir1, "foo"); + final Path bar = new Path(foo, "bar"); + final Path file1InBar = new Path(bar, "file1"); + final Path file2InBar = new Path(bar, "file2"); + hdfs.mkdirs(bar); + hdfs.mkdirs(sdir2); + DFSTestUtil.createFile(hdfs, file1InBar, BLOCKSIZE, REPL, SEED); + DFSTestUtil.createFile(hdfs, file2InBar, BLOCKSIZE, REPL, SEED); + + hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + + // create snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap1); + // delete file1InBar + hdfs.delete(file1InBar, true); + + // create another snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap2); + // delete file2InBar + hdfs.delete(file2InBar, true); + + // /dir1/foo/bar -> /dir2/bar + final Path bar2 = new Path(sdir2, "bar2"); + hdfs.rename(bar, bar2); + + // /dir1/foo -> /dir2/bar/foo + final Path foo2 = new Path(bar2, "foo2"); + hdfs.rename(foo, foo2); + + restartClusterAndCheckImage(true); + + // delete snapshot snap2 + hdfs.deleteSnapshot(root, snap2); + + // after deleteing snap2, the WithName node "bar", which originally was + // stored in the deleted list of "foo" for snap2, is moved to its deleted + // list for snap1. In that case, it will not be counted when calculating + // quota for "foo". However, we do not update this quota usage change while + // deleting snap2. + restartClusterAndCheckImage(false); + } + + /** + * move a directory to its prior descedant + */ + @Test + public void testRename2PreDescendant_3() throws Exception { + final Path root = new Path("/"); + final Path sdir1 = new Path("/dir1"); + final Path sdir2 = new Path("/dir2"); + final Path foo = new Path(sdir1, "foo"); + final Path bar = new Path(foo, "bar"); + final Path fileInBar = new Path(bar, "file"); + hdfs.mkdirs(bar); + hdfs.mkdirs(sdir2); + DFSTestUtil.createFile(hdfs, fileInBar, BLOCKSIZE, REPL, SEED); + + hdfs.setQuota(sdir1, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(sdir2, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(foo, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + hdfs.setQuota(bar, Long.MAX_VALUE - 1, Long.MAX_VALUE - 1); + + // create snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap1); + // delete fileInBar + hdfs.delete(fileInBar, true); + // create another snapshot on root + SnapshotTestHelper.createSnapshot(hdfs, root, snap2); + + // /dir1/foo/bar -> /dir2/bar + final Path bar2 = new Path(sdir2, "bar2"); + hdfs.rename(bar, bar2); + + // /dir1/foo -> /dir2/bar/foo + final Path foo2 = new Path(bar2, "foo2"); + hdfs.rename(foo, foo2); + + restartClusterAndCheckImage(true); + + // delete snapshot snap1 + hdfs.deleteSnapshot(root, snap1); + + restartClusterAndCheckImage(true); } /** @@ -1851,7 +1955,7 @@ public class TestRenameWithSnapshots { assertEquals(0, diff.getList(ListType.CREATED).size()); assertEquals(0, diff.getList(ListType.DELETED).size()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -1928,7 +2032,7 @@ public class TestRenameWithSnapshots { assertSame(wc, wc2); assertSame(fooRef2, wc.getParentReference()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -2041,7 +2145,7 @@ public class TestRenameWithSnapshots { .isEmpty()); assertTrue(barNode.getChildrenList(null).isEmpty()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } /** @@ -2137,6 +2241,6 @@ public class TestRenameWithSnapshots { assertSame(fooNode.asReference().getReferredINode(), fooNode_s2.getReferredINode()); - restartClusterAndCheckImage(); + restartClusterAndCheckImage(true); } }