diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index bd1a07e5064..ed0237e62c6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -87,3 +87,5 @@ Branch-2802 Snapshot (Unreleased) HDFS-4317. Change INode and its subclasses to support HDFS-4103. (szetszwo) HDFS-4103. Support O(1) snapshot creation. (szetszwo) + + HDFS-4330. Support snapshots up to the snapshot limit. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java index 70e38f3208b..5f34fbfd5a1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java @@ -2022,7 +2022,7 @@ public class FSDirectory implements Closeable { } } else { // a non-quota directory; so replace it with a directory with quota - return dirNode.replaceSelf4Quota(latest, oldNsQuota, oldDsQuota); + return dirNode.replaceSelf4Quota(latest, nsQuota, dsQuota); } return (oldNsQuota != nsQuota || oldDsQuota != dsQuota) ? dirNode : null; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java index c03cf7ed6c9..5503182c7cd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/snapshot/INodeDirectoryWithSnapshot.java @@ -31,7 +31,11 @@ import org.apache.hadoop.hdfs.util.ReadOnlyList; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -/** The directory with snapshots. */ +/** + * The directory with snapshots. It maintains a list of snapshot diffs for + * storing snapshot data. When there are modifications to the directory, the old + * data is stored in the latest snapshot, if there is any. + */ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { /** * The difference between the current state and a previous snapshot @@ -351,8 +355,8 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { /** Compare diffs with snapshot ID. */ @Override - public int compareTo(final Snapshot that_snapshot) { - return Snapshot.ID_COMPARATOR.compare(this.snapshot, that_snapshot); + public int compareTo(final Snapshot that) { + return Snapshot.ID_COMPARATOR.compare(this.snapshot, that); } /** Is the inode the root of the snapshot? */ @@ -381,9 +385,13 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { private INodeDirectory getSnapshotINode() { // get from this diff, then the posterior diff and then the current inode - return snapshotINode != null? snapshotINode - : posteriorDiff != null? posteriorDiff.getSnapshotINode() - : INodeDirectoryWithSnapshot.this; + for(SnapshotDiff d = this; ; d = d.posteriorDiff) { + if (d.snapshotINode != null) { + return d.snapshotINode; + } else if (d.posteriorDiff == null) { + return INodeDirectoryWithSnapshot.this; + } + } } /** @@ -429,17 +437,18 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { /** @return the child with the given name. */ INode getChild(byte[] name, boolean checkPosterior) { - final INode[] array = diff.accessPrevious(name); - if (array != null) { - // this diff is able to find it - return array[0]; - } else if (!checkPosterior) { - // Since checkPosterior is false, return null, i.e. not found. - return null; - } else { - // return the posterior INode. - return posteriorDiff != null? posteriorDiff.getChild(name, true) - : INodeDirectoryWithSnapshot.this.getChild(name, null); + for(SnapshotDiff d = this; ; d = d.posteriorDiff) { + final INode[] array = d.diff.accessPrevious(name); + if (array != null) { + // the diff is able to find it + return array[0]; + } else if (!checkPosterior) { + // Since checkPosterior is false, return null, i.e. not found. + return null; + } else if (d.posteriorDiff == null) { + // no more posterior diff, get from current inode. + return INodeDirectoryWithSnapshot.this.getChild(name, null); + } } } @@ -554,6 +563,7 @@ public class INodeDirectoryWithSnapshot extends INodeDirectoryWithQuota { return save2Snapshot(latest, null); } + /** Save the snapshot copy to the latest snapshot. */ public Pair save2Snapshot(Snapshot latest, INodeDirectory snapshotCopy) { return latest == null? null diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java index 5611f1a148a..ba85e06c0b1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSDirectory.java @@ -51,7 +51,6 @@ public class TestFSDirectory { private final Path sub11 = new Path(sub1, "sub11"); private final Path file3 = new Path(sub11, "file3"); - private final Path file4 = new Path(sub1, "z_file4"); private final Path file5 = new Path(sub1, "z_file5"); private final Path sub2 = new Path(dir, "sub2"); @@ -106,29 +105,13 @@ public class TestFSDirectory { for(; (line = in.readLine()) != null; ) { line = line.trim(); - if (!line.contains("snapshot")) { - Assert.assertTrue(line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) + if (!line.isEmpty() && !line.contains("snapshot")) { + Assert.assertTrue("line=" + line, + line.startsWith(INodeDirectory.DUMPTREE_LAST_ITEM) || line.startsWith(INodeDirectory.DUMPTREE_EXCEPT_LAST_ITEM)); checkClassName(line); } } - - LOG.info("Create a new file " + file4); - DFSTestUtil.createFile(hdfs, file4, 1024, REPLICATION, seed); - - final StringBuffer b2 = root.dumpTreeRecursively(); - System.out.println("b2=" + b2); - - int i = 0; - int j = b1.length() - 1; - for(; b1.charAt(i) == b2.charAt(i); i++); - int k = b2.length() - 1; - for(; b1.charAt(j) == b2.charAt(k); j--, k--); - final String diff = b2.substring(i, k + 1); - System.out.println("i=" + i + ", j=" + j + ", k=" + k); - System.out.println("diff=" + diff); - Assert.assertTrue(i > j); - Assert.assertTrue(diff.contains(file4.getName())); } @Test diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index 09b0a422947..4f41511e88a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -26,6 +26,8 @@ import java.util.HashMap; import java.util.Map; import java.util.Random; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileStatus; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; @@ -38,6 +40,8 @@ import org.apache.hadoop.hdfs.server.namenode.INode; * Helper for writing snapshot related tests */ public class SnapshotTestHelper { + public static final Log LOG = LogFactory.getLog(SnapshotTestHelper.class); + private SnapshotTestHelper() { // Cannot be instantinatied } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java index 449c77f8493..173485c32f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java @@ -17,13 +17,17 @@ */ package org.apache.hadoop.hdfs.server.namenode.snapshot; +import static org.apache.hadoop.hdfs.server.namenode.snapshot.INodeDirectorySnapshottable.SNAPSHOT_LIMIT; + import java.io.IOException; +import java.util.Random; import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnresolvedLinkException; +import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSTestUtil; import org.apache.hadoop.hdfs.DistributedFileSystem; @@ -56,6 +60,8 @@ public class TestNestedSnapshots { } private static final long SEED = 0; + private static Random RANDOM = new Random(SEED); + private static final short REPLICATION = 3; private static final long BLOCKSIZE = 1024; @@ -89,7 +95,7 @@ public class TestNestedSnapshots { */ @Test public void testNestedSnapshots() throws Exception { - final Path foo = new Path("/test/foo"); + final Path foo = new Path("/testNestedSnapshots/foo"); final Path bar = new Path(foo, "bar"); final Path file1 = new Path(bar, "file1"); DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, SEED); @@ -117,8 +123,8 @@ public class TestNestedSnapshots { assertFile(s1path, s2path, file2, true, false, false); } - private static void print(String mess) throws UnresolvedLinkException { - System.out.println("XXX " + mess); + private static void print(String message) throws UnresolvedLinkException { + System.out.println("XXX " + message); SnapshotTestHelper.dumpTreeRecursively(fsn.getFSDirectory().getINode("/")); } @@ -135,4 +141,42 @@ public class TestNestedSnapshots { Assert.assertEquals("Failed on " + paths[i], expected[i], computed); } } + + @Test + public void testSnapshotLimit() throws Exception { + final int step = 1000; + final String dirStr = "/testSnapshotLimit/dir"; + final Path dir = new Path(dirStr); + hdfs.mkdirs(dir, new FsPermission((short)0777)); + hdfs.allowSnapshot(dirStr); + + int s = 0; + for(; s < SNAPSHOT_LIMIT; s++) { + final String snapshotName = "s" + s; + hdfs.createSnapshot(snapshotName, dirStr); + + //create a file occasionally + if (s % step == 0) { + final Path file = new Path(dirStr, "f" + s); + DFSTestUtil.createFile(hdfs, file, BLOCKSIZE, REPLICATION, SEED); + } + } + + try { + hdfs.createSnapshot("s" + s, dirStr); + Assert.fail("Expected to fail to create snapshot, but didn't."); + } catch(IOException ioe) { + SnapshotTestHelper.LOG.info("The exception is expected.", ioe); + } + + for(int f = 0; f < SNAPSHOT_LIMIT; f += step) { + final String file = "f" + f; + s = RANDOM.nextInt(step); + for(; s < SNAPSHOT_LIMIT; s += RANDOM.nextInt(step)) { + final Path p = SnapshotTestHelper.getSnapshotPath(dir, "s" + s, file); + //the file #f exists in snapshot #s iff s > f. + Assert.assertEquals(s > f, hdfs.exists(p)); + } + } + } }