diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 265260203ed..86aa66f9ea2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -347,3 +347,7 @@ Branch-2802 Snapshot (Unreleased) HDFS-4802. Disallowing snapshot on / twice should throw SnapshotException but not IllegalStateException. (Jing Zhao via szetszwo) + + HDFS-4806. In INodeDirectoryWithSnapshot, use isInLatestSnapshot() to + determine if an added/removed child should be recorded in the snapshot diff. + (Jing Zhao via szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index 7a78b9e4227..da3443c9e6a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -550,7 +550,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { final INodeMap inodeMap) throws QuotaExceededException { ChildrenDiff diff = null; Integer undoInfo = null; - if (latest != null) { + if (isInLatestSnapshot(latest)) { diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff; undoInfo = diff.create(inode); } @@ -566,7 +566,18 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { final INodeMap inodeMap) throws QuotaExceededException { ChildrenDiff diff = null; UndoInfo undoInfo = null; - if (latest != null) { + // For a directory that is not a renamed node, if isInLatestSnapshot returns + // false, the directory is not in the latest snapshot, thus we do not need + // to record the removed child in any snapshot. + // For a directory that was moved/renamed, note that if the directory is in + // any of the previous snapshots, we will create a reference node for the + // directory while rename, and isInLatestSnapshot will return true in that + // scenario (if all previous snapshots have been deleted, isInLatestSnapshot + // still returns false). Thus if isInLatestSnapshot returns false, the + // directory node cannot be in any snapshot (not in current tree, nor in + // previous src tree). Thus we do not need to record the removed child in + // any snapshot. + if (isInLatestSnapshot(latest)) { diff = diffs.checkAndAddLatestSnapshotDiff(latest, this).diff; undoInfo = diff.delete(child); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index c61846287d8..264e0d46fb2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -52,6 +52,7 @@ import org.apache.hadoop.hdfs.server.namenode.FSDirectory; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.hdfs.server.namenode.INode; import org.apache.hadoop.hdfs.server.namenode.INodeDirectory; +import org.apache.hadoop.hdfs.server.namenode.INodeFile; import org.apache.hadoop.hdfs.server.namenode.INodeMap; import org.apache.hadoop.hdfs.server.namenode.INodeReference; import org.apache.hadoop.hdfs.server.namenode.INodeReference.WithCount; @@ -1674,4 +1675,51 @@ public class TestRenameWithSnapshots { restartClusterAndCheckImage(); } + + /** + * This test demonstrates that + * {@link INodeDirectoryWithSnapshot#removeChild(INode, Snapshot, INodeMap)} + * and + * {@link INodeDirectoryWithSnapshot#addChild(INode, boolean, Snapshot, INodeMap)} + * should use {@link INode#isInLatestSnapshot(Snapshot)} to check if the + * added/removed child should be recorded in snapshots. + */ + @Test + public void testRenameAndDeleteSnapshot_5() throws Exception { + final Path dir1 = new Path("/dir1"); + final Path dir2 = new Path("/dir2"); + final Path dir3 = new Path("/dir3"); + hdfs.mkdirs(dir1); + hdfs.mkdirs(dir2); + hdfs.mkdirs(dir3); + + final Path foo = new Path(dir1, "foo"); + hdfs.mkdirs(foo); + SnapshotTestHelper.createSnapshot(hdfs, dir1, "s1"); + final Path bar = new Path(foo, "bar"); + // create file bar, and foo will become an INodeDirectoryWithSnapshot + DFSTestUtil.createFile(hdfs, bar, BLOCKSIZE, REPL, SEED); + // delete snapshot s1. now foo is not in any snapshot + hdfs.deleteSnapshot(dir1, "s1"); + + SnapshotTestHelper.createSnapshot(hdfs, dir2, "s2"); + // rename /dir1/foo to /dir2/foo + final Path foo2 = new Path(dir2, foo.getName()); + hdfs.rename(foo, foo2); + // rename /dir2/foo/bar to /dir3/foo/bar + final Path bar2 = new Path(dir2, "foo/bar"); + final Path bar3 = new Path(dir3, "bar"); + hdfs.rename(bar2, bar3); + + // delete /dir2/foo. Since it is not in any snapshot, we will call its + // destroy function. If we do not use isInLatestSnapshot in removeChild and + // addChild methods in INodeDirectoryWithSnapshot, the file bar will be + // stored in the deleted list of foo, and will be destroyed. + hdfs.delete(foo2, true); + + // check if /dir3/bar still exists + assertTrue(hdfs.exists(bar3)); + INodeFile barNode = (INodeFile) fsdir.getINode4Write(bar3.toString()); + assertSame(fsdir.getINode4Write(dir3.toString()), barNode.getParent()); + } }