diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java index 79da1078ef2..ed687c14a61 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/SnapshotCommands.java @@ -23,6 +23,7 @@ import java.util.LinkedList; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathIsNotDirectoryException; import com.google.common.base.Preconditions; @@ -83,7 +84,8 @@ class SnapshotCommands extends FsCommand { } assert(items.size() == 1); PathData sroot = items.getFirst(); - sroot.fs.createSnapshot(sroot.path, snapshotName); + Path snapshotPath = sroot.fs.createSnapshot(sroot.path, snapshotName); + out.println("Created snapshot " + snapshotPath); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt index 0507159eb78..52e635d6b7e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.HDFS-2802.txt @@ -255,3 +255,6 @@ Branch-2802 Snapshot (Unreleased) (szetszwo) HDFS-4706. Do not replace root inode for disallowSnapshot. (szetszwo) + + HDFS-4717. Change the path parameter type of the snapshot methods in + HdfsAdmin from String to Path. (szetszwo) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java index eedbd0548f8..72663680dc9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java @@ -921,14 +921,14 @@ public class DistributedFileSystem extends FileSystem { return setSafeMode(SafeModeAction.SAFEMODE_GET, true); } - /** @see HdfsAdmin#allowSnapshot(String) */ - public void allowSnapshot(String path) throws IOException { - dfs.allowSnapshot(path); + /** @see HdfsAdmin#allowSnapshot(Path) */ + public void allowSnapshot(Path path) throws IOException { + dfs.allowSnapshot(getPathName(path)); } - /** @see HdfsAdmin#disallowSnapshot(String) */ - public void disallowSnapshot(String path) throws IOException { - dfs.disallowSnapshot(path); + /** @see HdfsAdmin#disallowSnapshot(Path) */ + public void disallowSnapshot(Path path) throws IOException { + dfs.disallowSnapshot(getPathName(path)); } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java index 0fe9e96764d..b80adcc339b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java @@ -110,7 +110,7 @@ public class HdfsAdmin { * Allow snapshot on a directory. * @param the path of the directory where snapshots will be taken */ - public void allowSnapshot(String path) throws IOException { + public void allowSnapshot(Path path) throws IOException { dfs.allowSnapshot(path); } @@ -118,7 +118,7 @@ public class HdfsAdmin { * Disallow snapshot on a directory. * @param path of the snapshottable directory. */ - public void disallowSnapshot(String path) throws IOException { + public void disallowSnapshot(Path path) throws IOException { dfs.disallowSnapshot(path); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java index e9363536f86..38465679dfe 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java @@ -414,7 +414,7 @@ public class DFSAdmin extends FsShell { */ public void allowSnapshot(String[] argv) throws IOException { DistributedFileSystem dfs = getDFS(); - dfs.allowSnapshot(argv[1]); + dfs.allowSnapshot(new Path(argv[1])); System.out.println("Allowing snaphot on " + argv[1] + " succeeded"); } @@ -426,7 +426,7 @@ public class DFSAdmin extends FsShell { */ public void disallowSnapshot(String[] argv) throws IOException { DistributedFileSystem dfs = getDFS(); - dfs.disallowSnapshot(argv[1]); + dfs.disallowSnapshot(new Path(argv[1])); System.out.println("Disallowing snaphot on " + argv[1] + " succeeded"); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java index 523dbc95bee..8969f6544e7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java @@ -156,12 +156,12 @@ public class OfflineEditsViewerHelper { Path pathDirectoryMkdir = new Path("/directory_mkdir"); dfs.mkdirs(pathDirectoryMkdir); // OP_ALLOW_SNAPSHOT 29 - dfs.allowSnapshot(pathDirectoryMkdir.toString()); + dfs.allowSnapshot(pathDirectoryMkdir); // OP_DISALLOW_SNAPSHOT 30 - dfs.disallowSnapshot(pathDirectoryMkdir.toString()); + dfs.disallowSnapshot(pathDirectoryMkdir); // OP_CREATE_SNAPSHOT 26 String ssName = "snapshot1"; - dfs.allowSnapshot(pathDirectoryMkdir.toString()); + dfs.allowSnapshot(pathDirectoryMkdir); dfs.createSnapshot(pathDirectoryMkdir, ssName); // OP_RENAME_SNAPSHOT 28 String ssNewName = "snapshot2"; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java index 9d9abb71619..b5b969f9e53 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSImageWithSnapshot.java @@ -282,7 +282,7 @@ public class TestFSImageWithSnapshot { DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed); // 1. create snapshot s0 - hdfs.allowSnapshot(dir.toString()); + hdfs.allowSnapshot(dir); hdfs.createSnapshot(dir, "s0"); // 2. create snapshot s1 before appending sub1file1 finishes @@ -337,7 +337,7 @@ public class TestFSImageWithSnapshot { DFSTestUtil.createFile(hdfs, sub1file1, BLOCKSIZE, REPLICATION, seed); DFSTestUtil.createFile(hdfs, sub1file2, BLOCKSIZE, REPLICATION, seed); - hdfs.allowSnapshot(dir.toString()); + hdfs.allowSnapshot(dir); hdfs.createSnapshot(dir, "s0"); HdfsDataOutputStream out = appendFileWithoutClosing(sub1file1, BLOCKSIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java index 5ce288c4116..612efff1250 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSnapshotPathINodes.java @@ -81,23 +81,24 @@ public class TestSnapshotPathINodes { /** Test allow-snapshot operation. */ @Test (timeout=15000) public void testAllowSnapshot() throws Exception { - final String path = sub1.toString(); - final INode before = fsdir.getINode(path); + final String pathStr = sub1.toString(); + final INode before = fsdir.getINode(pathStr); // Before a directory is snapshottable Assert.assertTrue(before instanceof INodeDirectory); Assert.assertFalse(before instanceof INodeDirectorySnapshottable); // After a directory is snapshottable + final Path path = new Path(pathStr); hdfs.allowSnapshot(path); { - final INode after = fsdir.getINode(path); + final INode after = fsdir.getINode(pathStr); Assert.assertTrue(after instanceof INodeDirectorySnapshottable); } hdfs.disallowSnapshot(path); { - final INode after = fsdir.getINode(path); + final INode after = fsdir.getINode(pathStr); Assert.assertTrue(after instanceof INodeDirectory); Assert.assertFalse(after instanceof INodeDirectorySnapshottable); } @@ -181,7 +182,7 @@ public class TestSnapshotPathINodes { public void testSnapshotPathINodes() throws Exception { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1, "s1"); // The path when accessing the snapshot file of file1 is // /TestSnapshot/sub1/.snapshot/s1/file1 @@ -247,7 +248,7 @@ public class TestSnapshotPathINodes { public void testSnapshotPathINodesAfterDeletion() throws Exception { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1, "s2"); // Delete the original file /TestSnapshot/sub1/file1 @@ -306,7 +307,7 @@ public class TestSnapshotPathINodes { public void testSnapshotPathINodesWithAddedFile() throws Exception { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1, "s4"); // Add a new file /TestSnapshot/sub1/file3 @@ -379,7 +380,7 @@ public class TestSnapshotPathINodes { // Create a snapshot for the dir, and check the inodes for the path // pointing to a snapshot file - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1, "s3"); // Modify file1 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java index fefe5fafcd3..146065d39bd 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/SnapshotTestHelper.java @@ -130,7 +130,7 @@ public class SnapshotTestHelper { Path snapshotRoot, String snapshotName) throws Exception { LOG.info("createSnapshot " + snapshotName + " for " + snapshotRoot); assertTrue(hdfs.exists(snapshotRoot)); - hdfs.allowSnapshot(snapshotRoot.toString()); + hdfs.allowSnapshot(snapshotRoot); hdfs.createSnapshot(snapshotRoot, snapshotName); // set quota to a large value for testing counts hdfs.setQuota(snapshotRoot, Long.MAX_VALUE-1, Long.MAX_VALUE-1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java index 3e3b3d99367..58415a73b37 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestNestedSnapshots.java @@ -88,14 +88,14 @@ public class TestNestedSnapshots { final String s1name = "foo-s1"; final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, s1name); - hdfs.allowSnapshot(foo.toString()); + hdfs.allowSnapshot(foo); print("allow snapshot " + foo); hdfs.createSnapshot(foo, s1name); print("create snapshot " + s1name); final String s2name = "bar-s2"; final Path s2path = SnapshotTestHelper.getSnapshotRoot(bar, s2name); - hdfs.allowSnapshot(bar.toString()); + hdfs.allowSnapshot(bar); print("allow snapshot " + bar); hdfs.createSnapshot(bar, s2name); print("create snapshot " + s2name); @@ -109,13 +109,13 @@ public class TestNestedSnapshots { final String rootStr = "/"; final Path rootPath = new Path(rootStr); - hdfs.allowSnapshot(rootStr); + hdfs.allowSnapshot(rootPath); print("allow snapshot " + rootStr); final Path rootSnapshot = hdfs.createSnapshot(rootPath); print("create snapshot " + rootSnapshot); hdfs.deleteSnapshot(rootPath, rootSnapshot.getName()); print("delete snapshot " + rootSnapshot); - hdfs.disallowSnapshot(rootStr); + hdfs.disallowSnapshot(rootPath); print("disallow snapshot " + rootStr); } @@ -143,7 +143,7 @@ public class TestNestedSnapshots { final String dirStr = "/testSnapshotLimit/dir"; final Path dir = new Path(dirStr); hdfs.mkdirs(dir, new FsPermission((short)0777)); - hdfs.allowSnapshot(dirStr); + hdfs.allowSnapshot(dir); int s = 0; for(; s < SNAPSHOT_LIMIT; s++) { @@ -180,7 +180,7 @@ public class TestNestedSnapshots { final String dirStr = "/testSnapshotWithQuota/dir"; final Path dir = new Path(dirStr); hdfs.mkdirs(dir, new FsPermission((short)0777)); - hdfs.allowSnapshot(dirStr); + hdfs.allowSnapshot(dir); // set namespace quota final int NS_QUOTA = 6; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java index 1969ea052b3..2e0020c908c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestRenameWithSnapshots.java @@ -116,7 +116,7 @@ public class TestRenameWithSnapshots { final String abcStr = dirStr + "/abc"; final Path abc = new Path(abcStr); hdfs.mkdirs(abc, new FsPermission((short)0777)); - hdfs.allowSnapshot(abcStr); + hdfs.allowSnapshot(abc); final Path foo = new Path(abc, "foo"); DFSTestUtil.createFile(hdfs, foo, BLOCKSIZE, REPL, SEED); @@ -175,7 +175,7 @@ public class TestRenameWithSnapshots { @Test (timeout=60000) public void testRenameFileNotInSnapshot() throws Exception { hdfs.mkdirs(sub1); - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1, snap1); DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED); hdfs.rename(file1, file2); @@ -195,7 +195,7 @@ public class TestRenameWithSnapshots { @Test (timeout=60000) public void testRenameFileInSnapshot() throws Exception { hdfs.mkdirs(sub1); - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED); hdfs.createSnapshot(sub1, snap1); hdfs.rename(file1, file2); @@ -213,7 +213,7 @@ public class TestRenameWithSnapshots { @Test (timeout=60000) public void testRenameTwiceInSnapshot() throws Exception { hdfs.mkdirs(sub1); - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPL, SEED); hdfs.createSnapshot(sub1, snap1); hdfs.rename(file1, file2); @@ -1062,7 +1062,7 @@ public class TestRenameWithSnapshots { hdfs.mkdirs(foo); hdfs.mkdirs(bar); - hdfs.allowSnapshot(foo.toString()); + hdfs.allowSnapshot(foo); SnapshotTestHelper.createSnapshot(hdfs, bar, snap1); assertEquals(2, fsn.getSnapshottableDirListing().length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java index e4827cc9f01..dce36039ee9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshot.java @@ -277,7 +277,7 @@ public class TestSnapshot { FileStatus oldStatus = hdfs.getFileStatus(sub); - hdfs.allowSnapshot(dir.toString()); + hdfs.allowSnapshot(dir); hdfs.createSnapshot(dir, "s1"); hdfs.setTimes(sub, 100L, 100L); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java index 7d18a736e40..90370e9d993 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDeletion.java @@ -107,7 +107,7 @@ public class TestSnapshotDeletion { DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); // Allow snapshot for sub1, and create snapshot for it - hdfs.allowSnapshot(sub.toString()); + hdfs.allowSnapshot(sub); hdfs.createSnapshot(sub, "s1"); // Deleting a snapshottable dir with snapshots should fail @@ -135,7 +135,7 @@ public class TestSnapshotDeletion { DFSTestUtil.createFile(hdfs, subfile2, BLOCKSIZE, REPLICATION, seed); // Allow snapshot for subsub1, and create snapshot for it - hdfs.allowSnapshot(subsub.toString()); + hdfs.allowSnapshot(subsub); hdfs.createSnapshot(subsub, "s1"); // Deleting dir while its descedant subsub1 having snapshots should fail @@ -356,7 +356,7 @@ public class TestSnapshotDeletion { } // make sub snapshottable - hdfs.allowSnapshot(sub.toString()); + hdfs.allowSnapshot(sub); try { hdfs.deleteSnapshot(sub, snapshotName); fail("SnapshotException expected: snapshot " + snapshotName diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java index 75f803e40ec..66417b02e31 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotDiffReport.java @@ -100,7 +100,7 @@ public class TestSnapshotDiffReport { DFSTestUtil.createFile(hdfs, file13, BLOCKSIZE, REPLICATION_1, seed); // create snapshot for (Path snapshotDir : snapshotDirs) { - hdfs.allowSnapshot(snapshotDir.toString()); + hdfs.allowSnapshot(snapshotDir); hdfs.createSnapshot(snapshotDir, genSnapshotName(snapshotDir)); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java index 0afe8e28a20..2639d445ac6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotListing.java @@ -87,7 +87,7 @@ public class TestSnapshotListing { } // list before creating snapshots - hdfs.allowSnapshot(dir.toString()); + hdfs.allowSnapshot(dir); stats = hdfs.listStatus(snapshotsPath); assertEquals(0, stats.length); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java index e3ee87a50b2..8d2fa5a2312 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotMetrics.java @@ -83,34 +83,34 @@ public class TestSnapshotMetrics { assertCounter("DisallowSnapshotOps", 0L, getMetrics(NN_METRICS)); // Allow snapshots for directories, and check the metrics - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); assertGauge("SnapshottableDirectories", 1, getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps", 1L, getMetrics(NN_METRICS)); Path sub2 = new Path(dir, "sub2"); Path file = new Path(sub2, "file"); DFSTestUtil.createFile(hdfs, file, 1024, REPLICATION, seed); - hdfs.allowSnapshot(sub2.toString()); + hdfs.allowSnapshot(sub2); assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps", 2L, getMetrics(NN_METRICS)); Path subsub1 = new Path(sub1, "sub1sub1"); Path subfile = new Path(subsub1, "file"); DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed); - hdfs.allowSnapshot(subsub1.toString()); + hdfs.allowSnapshot(subsub1); assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS)); assertCounter("AllowSnapshotOps", 3L, getMetrics(NN_METRICS)); // Set an already snapshottable directory to snapshottable, should not // change the metrics - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); assertGauge("SnapshottableDirectories", 3, getMetrics(NS_METRICS)); // But the number of allowSnapshot operations still increases assertCounter("AllowSnapshotOps", 4L, getMetrics(NN_METRICS)); // Disallow the snapshot for snapshottable directories, then check the // metrics again - hdfs.disallowSnapshot(sub1.toString()); + hdfs.disallowSnapshot(sub1); assertGauge("SnapshottableDirectories", 2, getMetrics(NS_METRICS)); assertCounter("DisallowSnapshotOps", 1L, getMetrics(NN_METRICS)); @@ -142,7 +142,7 @@ public class TestSnapshotMetrics { assertCounter("CreateSnapshotOps", 1L, getMetrics(NN_METRICS)); // Create snapshot for sub1 - hdfs.allowSnapshot(sub1.toString()); + hdfs.allowSnapshot(sub1); hdfs.createSnapshot(sub1, "s1"); assertGauge("Snapshots", 1, getMetrics(NS_METRICS)); assertCounter("CreateSnapshotOps", 2L, getMetrics(NN_METRICS)); @@ -156,7 +156,7 @@ public class TestSnapshotMetrics { Path subsub1 = new Path(sub1, "sub1sub1"); Path subfile = new Path(subsub1, "file"); DFSTestUtil.createFile(hdfs, subfile, 1024, REPLICATION, seed); - hdfs.allowSnapshot(subsub1.toString()); + hdfs.allowSnapshot(subsub1); hdfs.createSnapshot(subsub1, "s11"); assertGauge("Snapshots", 3, getMetrics(NS_METRICS)); assertCounter("CreateSnapshotOps", 4L, getMetrics(NN_METRICS)); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java index bc2f1a366a4..6fd30ee471f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshottableDirListing.java @@ -80,7 +80,7 @@ public class TestSnapshottableDirListing { assertNull(dirs); // Make dir1 as snapshottable - hdfs.allowSnapshot(dir1.toString()); + hdfs.allowSnapshot(dir1); dirs = hdfs.getSnapshottableDirListing(); assertEquals(1, dirs.length); assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName()); @@ -89,7 +89,7 @@ public class TestSnapshottableDirListing { assertEquals(0, dirs[0].getSnapshotNumber()); // Make dir2 as snapshottable - hdfs.allowSnapshot(dir2.toString()); + hdfs.allowSnapshot(dir2); dirs = hdfs.getSnapshottableDirListing(); assertEquals(2, dirs.length); assertEquals(dir1.getName(), dirs[0].getDirStatus().getLocalName()); @@ -110,7 +110,7 @@ public class TestSnapshottableDirListing { assertEquals(dir1, dirs[0].getFullPath()); // Make dir2 snapshottable again - hdfs.allowSnapshot(dir2.toString()); + hdfs.allowSnapshot(dir2); // Create a snapshot for dir2 hdfs.createSnapshot(dir2, "s1"); hdfs.createSnapshot(dir2, "s2"); @@ -127,8 +127,8 @@ public class TestSnapshottableDirListing { DFSTestUtil.createFile(hdfs, file1, BLOCKSIZE, REPLICATION, seed); DFSTestUtil.createFile(hdfs, file2, BLOCKSIZE, REPLICATION, seed); // Make sub1 and sub2 snapshottable - hdfs.allowSnapshot(sub1.toString()); - hdfs.allowSnapshot(sub2.toString()); + hdfs.allowSnapshot(sub1); + hdfs.allowSnapshot(sub2); dirs = hdfs.getSnapshottableDirListing(); assertEquals(4, dirs.length); assertEquals(dir1, dirs[0].getFullPath()); @@ -137,7 +137,7 @@ public class TestSnapshottableDirListing { assertEquals(sub2, dirs[3].getFullPath()); // reset sub1 - hdfs.disallowSnapshot(sub1.toString()); + hdfs.disallowSnapshot(sub1); dirs = hdfs.getSnapshottableDirListing(); assertEquals(3, dirs.length); assertEquals(dir1, dirs[0].getFullPath()); @@ -159,8 +159,8 @@ public class TestSnapshottableDirListing { @Test (timeout=60000) public void testListWithDifferentUser() throws Exception { // first make dir1 and dir2 snapshottable - hdfs.allowSnapshot(dir1.toString()); - hdfs.allowSnapshot(dir2.toString()); + hdfs.allowSnapshot(dir1); + hdfs.allowSnapshot(dir2); hdfs.setPermission(root, FsPermission.valueOf("-rwxrwxrwx")); // create two dirs and make them snapshottable under the name of user1 @@ -172,8 +172,8 @@ public class TestSnapshottableDirListing { Path dir2_user1 = new Path("/dir2_user1"); fs1.mkdirs(dir1_user1); fs1.mkdirs(dir2_user1); - fs1.allowSnapshot(dir1_user1.toString()); - fs1.allowSnapshot(dir2_user1.toString()); + fs1.allowSnapshot(dir1_user1); + fs1.allowSnapshot(dir2_user1); // user2 UserGroupInformation ugi2 = UserGroupInformation.createUserForTesting( @@ -184,8 +184,8 @@ public class TestSnapshottableDirListing { Path subdir_user2 = new Path(dir_user2, "subdir"); fs2.mkdirs(dir_user2); fs2.mkdirs(subdir_user2); - fs2.allowSnapshot(dir_user2.toString()); - fs2.allowSnapshot(subdir_user2.toString()); + fs2.allowSnapshot(dir_user2); + fs2.allowSnapshot(subdir_user2); // super user String supergroup = conf.get(DFS_PERMISSIONS_SUPERUSERGROUP_KEY,