HDFS-9057. allow/disallow snapshots via webhdfs (Contributed by Brahma Reddy Battula)
This commit is contained in:
parent
3ae82863f0
commit
6d2332ae37
|
@ -1090,6 +1090,12 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
new FsPathRunner(op, p, new AclPermissionParam(aclSpec)).run();
|
||||
}
|
||||
|
||||
public void allowSnapshot(final Path p) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.ALLOWSNAPSHOT;
|
||||
new FsPathRunner(op, p).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public Path createSnapshot(final Path path, final String snapshotName)
|
||||
throws IOException {
|
||||
|
@ -1104,6 +1110,12 @@ public class WebHdfsFileSystem extends FileSystem
|
|||
}.run();
|
||||
}
|
||||
|
||||
public void disallowSnapshot(final Path p) throws IOException {
|
||||
statistics.incrementWriteOps(1);
|
||||
final HttpOpParam.Op op = PutOpParam.Op.DISALLOWSNAPSHOT;
|
||||
new FsPathRunner(op, p).run();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void deleteSnapshot(final Path path, final String snapshotName)
|
||||
throws IOException {
|
||||
|
|
|
@ -46,6 +46,8 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
|
|||
SETXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
REMOVEXATTR(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
ALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
DISALLOWSNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
|
||||
|
||||
|
|
|
@ -50,6 +50,9 @@ Trunk (Unreleased)
|
|||
|
||||
HDFS-6440. Support more than 2 NameNodes. (Jesse Yates via atm)
|
||||
|
||||
HDFS-9057. allow/disallow snapshots via webhdfs
|
||||
(Bramma Reddy Battula via vinayakumarb)
|
||||
|
||||
IMPROVEMENTS
|
||||
|
||||
HDFS-4665. Move TestNetworkTopologyWithNodeGroup to common.
|
||||
|
|
|
@ -590,6 +590,10 @@ public class NamenodeWebHdfsMethods {
|
|||
np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case ALLOWSNAPSHOT: {
|
||||
np.allowSnapshot(fullpath);
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case CREATESNAPSHOT: {
|
||||
String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue());
|
||||
final String js = JsonUtil.toJsonString(
|
||||
|
@ -601,6 +605,10 @@ public class NamenodeWebHdfsMethods {
|
|||
snapshotName.getValue());
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
case DISALLOWSNAPSHOT: {
|
||||
np.disallowSnapshot(fullpath);
|
||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||
}
|
||||
default:
|
||||
throw new UnsupportedOperationException(op + " is not supported");
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.web;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNotEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.EOFException;
|
||||
|
@ -50,6 +51,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|||
import org.apache.hadoop.hdfs.TestDFSClientRetries;
|
||||
import org.apache.hadoop.hdfs.TestFileCreation;
|
||||
import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
|
||||
import org.apache.hadoop.hdfs.protocol.SnapshottableDirectoryStatus;
|
||||
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
||||
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
|
||||
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
|
||||
|
@ -338,6 +340,60 @@ public class TestWebHDFS {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test allow and disallow snapshot through WebHdfs. Verifying webhdfs with
|
||||
* Distributed filesystem methods.
|
||||
*/
|
||||
@Test
|
||||
public void testWebHdfsAllowandDisallowSnapshots() throws Exception {
|
||||
MiniDFSCluster cluster = null;
|
||||
final Configuration conf = WebHdfsTestUtil.createConf();
|
||||
try {
|
||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||
cluster.waitActive();
|
||||
final DistributedFileSystem dfs = cluster.getFileSystem();
|
||||
final WebHdfsFileSystem webHdfs = WebHdfsTestUtil
|
||||
.getWebHdfsFileSystem(conf, WebHdfsConstants.WEBHDFS_SCHEME);
|
||||
|
||||
final Path bar = new Path("/bar");
|
||||
dfs.mkdirs(bar);
|
||||
|
||||
// allow snapshots on /bar using webhdfs
|
||||
webHdfs.allowSnapshot(bar);
|
||||
webHdfs.createSnapshot(bar, "s1");
|
||||
final Path s1path = SnapshotTestHelper.getSnapshotRoot(bar, "s1");
|
||||
Assert.assertTrue(webHdfs.exists(s1path));
|
||||
SnapshottableDirectoryStatus[] snapshottableDirs =
|
||||
dfs.getSnapshottableDirListing();
|
||||
assertEquals(1, snapshottableDirs.length);
|
||||
assertEquals(bar, snapshottableDirs[0].getFullPath());
|
||||
dfs.deleteSnapshot(bar, "s1");
|
||||
dfs.disallowSnapshot(bar);
|
||||
snapshottableDirs = dfs.getSnapshottableDirListing();
|
||||
assertNull(snapshottableDirs);
|
||||
|
||||
// disallow snapshots on /bar using webhdfs
|
||||
dfs.allowSnapshot(bar);
|
||||
snapshottableDirs = dfs.getSnapshottableDirListing();
|
||||
assertEquals(1, snapshottableDirs.length);
|
||||
assertEquals(bar, snapshottableDirs[0].getFullPath());
|
||||
webHdfs.disallowSnapshot(bar);
|
||||
snapshottableDirs = dfs.getSnapshottableDirListing();
|
||||
assertNull(snapshottableDirs);
|
||||
try {
|
||||
webHdfs.createSnapshot(bar);
|
||||
fail("Cannot create snapshot on a non-snapshottable directory");
|
||||
} catch (Exception e) {
|
||||
GenericTestUtils.assertExceptionContains(
|
||||
"Directory is not a snapshottable directory", e);
|
||||
}
|
||||
} finally {
|
||||
if (cluster != null) {
|
||||
cluster.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Test snapshot creation through WebHdfs
|
||||
*/
|
||||
|
|
Loading…
Reference in New Issue