diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
index 959d9d57e4d..7af5fa74e76 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/AbstractFileSystem.java
@@ -1191,6 +1191,36 @@ public abstract class AbstractFileSystem {
+ " doesn't support removeXAttr");
}
+ /**
+ * The specification of this method matches that of
+ * {@link FileContext#createSnapshot(Path, String)}.
+ */
+ public Path createSnapshot(final Path path, final String snapshotName)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support createSnapshot");
+ }
+
+ /**
+ * The specification of this method matches that of
+ * {@link FileContext#renameSnapshot(Path, String, String)}.
+ */
+ public void renameSnapshot(final Path path, final String snapshotOldName,
+ final String snapshotNewName) throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support renameSnapshot");
+ }
+
+ /**
+ * The specification of this method matches that of
+ * {@link FileContext#deleteSnapshot(Path, String)}.
+ */
+ public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
+ throws IOException {
+ throw new UnsupportedOperationException(getClass().getSimpleName()
+ + " doesn't support deleteSnapshot");
+ }
+
@Override //Object
public int hashCode() {
return myUri.hashCode();
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
index ea3f8964b15..16cb59124ef 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java
@@ -2564,4 +2564,105 @@ public class FileContext {
}
}.resolve(this, absF);
}
+
+ /**
+ * Create a snapshot with a default name.
+ *
+ * @param path The directory where snapshots will be taken.
+ * @return the snapshot path.
+ *
+ * @throws IOException If an I/O error occurred
+ *
+ *
Exceptions applicable to file systems accessed over RPC:
+ * @throws RpcClientException If an exception occurred in the RPC client
+ * @throws RpcServerException If an exception occurred in the RPC server
+ * @throws UnexpectedServerException If server implementation throws
+ * undeclared exception to RPC server
+ */
+ public final Path createSnapshot(Path path) throws IOException {
+ return createSnapshot(path, null);
+ }
+
+ /**
+ * Create a snapshot.
+ *
+ * @param path The directory where snapshots will be taken.
+ * @param snapshotName The name of the snapshot
+ * @return the snapshot path.
+ *
+ * @throws IOException If an I/O error occurred
+ *
+ *
Exceptions applicable to file systems accessed over RPC:
+ * @throws RpcClientException If an exception occurred in the RPC client
+ * @throws RpcServerException If an exception occurred in the RPC server
+ * @throws UnexpectedServerException If server implementation throws
+ * undeclared exception to RPC server
+ */
+ public Path createSnapshot(final Path path, final String snapshotName)
+ throws IOException {
+ final Path absF = fixRelativePart(path);
+ return new FSLinkResolver() {
+
+ @Override
+ public Path next(final AbstractFileSystem fs, final Path p)
+ throws IOException {
+ return fs.createSnapshot(p, snapshotName);
+ }
+ }.resolve(this, absF);
+ }
+
+ /**
+ * Rename a snapshot.
+ *
+ * @param path The directory path where the snapshot was taken
+ * @param snapshotOldName Old name of the snapshot
+ * @param snapshotNewName New name of the snapshot
+ *
+ * @throws IOException If an I/O error occurred
+ *
+ * Exceptions applicable to file systems accessed over RPC:
+ * @throws RpcClientException If an exception occurred in the RPC client
+ * @throws RpcServerException If an exception occurred in the RPC server
+ * @throws UnexpectedServerException If server implementation throws
+ * undeclared exception to RPC server
+ */
+ public void renameSnapshot(final Path path, final String snapshotOldName,
+ final String snapshotNewName) throws IOException {
+ final Path absF = fixRelativePart(path);
+ new FSLinkResolver() {
+ @Override
+ public Void next(final AbstractFileSystem fs, final Path p)
+ throws IOException {
+ fs.renameSnapshot(p, snapshotOldName, snapshotNewName);
+ return null;
+ }
+ }.resolve(this, absF);
+ }
+
+ /**
+ * Delete a snapshot of a directory.
+ *
+ * @param path The directory that the to-be-deleted snapshot belongs to
+ * @param snapshotName The name of the snapshot
+ *
+ * @throws IOException If an I/O error occurred
+ *
+ * Exceptions applicable to file systems accessed over RPC:
+ * @throws RpcClientException If an exception occurred in the RPC client
+ * @throws RpcServerException If an exception occurred in the RPC server
+ * @throws UnexpectedServerException If server implementation throws
+ * undeclared exception to RPC server
+ */
+ public void deleteSnapshot(final Path path, final String snapshotName)
+ throws IOException {
+ final Path absF = fixRelativePart(path);
+ new FSLinkResolver() {
+ @Override
+ public Void next(final AbstractFileSystem fs, final Path p)
+ throws IOException {
+ fs.deleteSnapshot(p, snapshotName);
+ return null;
+ }
+ }.resolve(this, absF);
+ }
}
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
index 4f28a9a6b2d..f6a6f233f43 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FilterFs.java
@@ -371,4 +371,22 @@ public abstract class FilterFs extends AbstractFileSystem {
public void removeXAttr(Path path, String name) throws IOException {
myFs.removeXAttr(path, name);
}
+
+ @Override
+ public Path createSnapshot(final Path path, final String snapshotName)
+ throws IOException {
+ return myFs.createSnapshot(path, snapshotName);
+ }
+
+ @Override
+ public void renameSnapshot(final Path path, final String snapshotOldName,
+ final String snapshotNewName) throws IOException {
+ myFs.renameSnapshot(path, snapshotOldName, snapshotNewName);
+ }
+
+ @Override
+ public void deleteSnapshot(final Path path, final String snapshotName)
+ throws IOException {
+ myFs.deleteSnapshot(path, snapshotName);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
index e22f961fd6e..efce95be48a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
@@ -525,6 +525,8 @@ Release 2.8.0 - UNRELEASED
HDFS-7559. Create unit test to automatically compare HDFS related classes
and hdfs-default.xml. (Ray Chiang via asuresh)
+ HDFS-5640. Add snapshot methods to FileContext. (Rakesh R via cnauroth)
+
OPTIMIZATIONS
HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
index aaaff251579..b7768493cbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/fs/Hdfs.java
@@ -495,4 +495,22 @@ public class Hdfs extends AbstractFileSystem {
throws InvalidToken, IOException {
dfs.cancelDelegationToken((Token) token);
}
+
+ @Override
+ public Path createSnapshot(final Path path, final String snapshotName)
+ throws IOException {
+ return new Path(dfs.createSnapshot(getUriPath(path), snapshotName));
+ }
+
+ @Override
+ public void renameSnapshot(final Path path, final String snapshotOldName,
+ final String snapshotNewName) throws IOException {
+ dfs.renameSnapshot(getUriPath(path), snapshotOldName, snapshotNewName);
+ }
+
+ @Override
+ public void deleteSnapshot(final Path snapshotDir, final String snapshotName)
+ throws IOException {
+ dfs.deleteSnapshot(getUriPath(snapshotDir), snapshotName);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java
new file mode 100644
index 00000000000..dcabed7be38
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestFileContextSnapshot.java
@@ -0,0 +1,124 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.namenode.snapshot;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.SnapshotException;
+import org.apache.hadoop.test.GenericTestUtils;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestFileContextSnapshot {
+
+ private static final short REPLICATION = 3;
+ private static final int BLOCKSIZE = 1024;
+ private static final long SEED = 0;
+ private Configuration conf;
+ private MiniDFSCluster cluster;
+ private FileContext fileContext;
+ private DistributedFileSystem dfs;
+
+ private final String snapshotRoot = "/snapshot";
+ private final Path filePath = new Path(snapshotRoot, "file1");
+ private Path snapRootPath;
+
+ @Before
+ public void setUp() throws Exception {
+ conf = new Configuration();
+ conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCKSIZE);
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION)
+ .build();
+ cluster.waitActive();
+
+ fileContext = FileContext.getFileContext(conf);
+ dfs = (DistributedFileSystem) cluster.getFileSystem();
+ snapRootPath = new Path(snapshotRoot);
+ dfs.mkdirs(snapRootPath);
+ }
+
+ @After
+ public void tearDown() throws Exception {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
+
+ @Test(timeout = 60000)
+ public void testCreateAndDeleteSnapshot() throws Exception {
+ DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
+ // disallow snapshot on dir
+ dfs.disallowSnapshot(snapRootPath);
+ try {
+ fileContext.createSnapshot(snapRootPath, "s1");
+ } catch (SnapshotException e) {
+ GenericTestUtils.assertExceptionContains(
+ "Directory is not a snapshottable directory: " + snapRootPath, e);
+ }
+
+ // allow snapshot on dir
+ dfs.allowSnapshot(snapRootPath);
+ Path ssPath = fileContext.createSnapshot(snapRootPath, "s1");
+ assertTrue("Failed to create snapshot", dfs.exists(ssPath));
+ fileContext.deleteSnapshot(snapRootPath, "s1");
+ assertFalse("Failed to delete snapshot", dfs.exists(ssPath));
+ }
+
+ /**
+ * Test FileStatus of snapshot file before/after rename
+ */
+ @Test(timeout = 60000)
+ public void testRenameSnapshot() throws Exception {
+ DFSTestUtil.createFile(dfs, filePath, BLOCKSIZE, REPLICATION, SEED);
+ dfs.allowSnapshot(snapRootPath);
+ // Create snapshot for sub1
+ Path snapPath1 = fileContext.createSnapshot(snapRootPath, "s1");
+ Path ssPath = new Path(snapPath1, filePath.getName());
+ assertTrue("Failed to create snapshot", dfs.exists(ssPath));
+ FileStatus statusBeforeRename = dfs.getFileStatus(ssPath);
+
+ // Rename the snapshot
+ fileContext.renameSnapshot(snapRootPath, "s1", "s2");
+ // /.snapshot/s1/file1 should no longer exist
+ assertFalse("Old snapshot still exists after rename!", dfs.exists(ssPath));
+ Path snapshotRoot = SnapshotTestHelper.getSnapshotRoot(snapRootPath, "s2");
+ ssPath = new Path(snapshotRoot, filePath.getName());
+
+ // Instead, /.snapshot/s2/file1 should exist
+ assertTrue("Snapshot doesn't exists!", dfs.exists(ssPath));
+ FileStatus statusAfterRename = dfs.getFileStatus(ssPath);
+
+ // FileStatus of the snapshot should not change except the path
+ assertFalse("Filestatus of the snapshot matches",
+ statusBeforeRename.equals(statusAfterRename));
+ statusBeforeRename.setPath(statusAfterRename.getPath());
+ assertEquals("FileStatus of the snapshot mismatches!",
+ statusBeforeRename.toString(), statusAfterRename.toString());
+ }
+}
\ No newline at end of file