HDFS-6432. Merge r1596334 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1596335 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Jing Zhao 2014-05-20 17:35:14 +00:00
parent 11bb19d1ff
commit ffc50dfe01
8 changed files with 281 additions and 6 deletions

View File

@ -118,6 +118,8 @@ Release 2.5.0 - UNRELEASED
HDFS-6345. DFS.listCacheDirectives() should allow filtering based on HDFS-6345. DFS.listCacheDirectives() should allow filtering based on
cache directive ID. (wang) cache directive ID. (wang)
HDFS-6432. Add snapshot related APIs to webhdfs. (jing9)
OPTIMIZATIONS OPTIMIZATIONS
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn) HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)

View File

@ -88,6 +88,7 @@ import org.apache.hadoop.hdfs.web.resources.LengthParam;
import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam; import org.apache.hadoop.hdfs.web.resources.ModificationTimeParam;
import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam; import org.apache.hadoop.hdfs.web.resources.NamenodeAddressParam;
import org.apache.hadoop.hdfs.web.resources.OffsetParam; import org.apache.hadoop.hdfs.web.resources.OffsetParam;
import org.apache.hadoop.hdfs.web.resources.OldSnapshotNameParam;
import org.apache.hadoop.hdfs.web.resources.OverwriteParam; import org.apache.hadoop.hdfs.web.resources.OverwriteParam;
import org.apache.hadoop.hdfs.web.resources.OwnerParam; import org.apache.hadoop.hdfs.web.resources.OwnerParam;
import org.apache.hadoop.hdfs.web.resources.Param; import org.apache.hadoop.hdfs.web.resources.Param;
@ -98,6 +99,7 @@ import org.apache.hadoop.hdfs.web.resources.RecursiveParam;
import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam; import org.apache.hadoop.hdfs.web.resources.RenameOptionSetParam;
import org.apache.hadoop.hdfs.web.resources.RenewerParam; import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam; import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.SnapshotNameParam;
import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam; import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam; import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam; import org.apache.hadoop.hdfs.web.resources.UserParam;
@ -341,12 +343,16 @@ public class NamenodeWebHdfsMethods {
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument, final TokenArgumentParam delegationTokenArgument,
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission final AclPermissionParam aclPermission,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
final OldSnapshotNameParam oldSnapshotName
)throws IOException, InterruptedException { )throws IOException, InterruptedException {
return put(ugi, delegation, username, doAsUser, ROOT, op, destination, return put(ugi, delegation, username, doAsUser, ROOT, op, destination,
owner, group, permission, overwrite, bufferSize, replication, owner, group, permission, overwrite, bufferSize, replication,
blockSize, modificationTime, accessTime, renameOptions, createParent, blockSize, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument,aclPermission); delegationTokenArgument, aclPermission, snapshotName, oldSnapshotName);
} }
/** Handle HTTP PUT request. */ /** Handle HTTP PUT request. */
@ -392,12 +398,17 @@ public class NamenodeWebHdfsMethods {
@QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT) @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
final TokenArgumentParam delegationTokenArgument, final TokenArgumentParam delegationTokenArgument,
@QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT) @QueryParam(AclPermissionParam.NAME) @DefaultValue(AclPermissionParam.DEFAULT)
final AclPermissionParam aclPermission final AclPermissionParam aclPermission,
@QueryParam(SnapshotNameParam.NAME) @DefaultValue(SnapshotNameParam.DEFAULT)
final SnapshotNameParam snapshotName,
@QueryParam(OldSnapshotNameParam.NAME) @DefaultValue(OldSnapshotNameParam.DEFAULT)
final OldSnapshotNameParam oldSnapshotName
) throws IOException, InterruptedException { ) throws IOException, InterruptedException {
init(ugi, delegation, username, doAsUser, path, op, destination, owner, init(ugi, delegation, username, doAsUser, path, op, destination, owner,
group, permission, overwrite, bufferSize, replication, blockSize, group, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, delegationTokenArgument,aclPermission); modificationTime, accessTime, renameOptions, delegationTokenArgument,
aclPermission, snapshotName, oldSnapshotName);
return ugi.doAs(new PrivilegedExceptionAction<Response>() { return ugi.doAs(new PrivilegedExceptionAction<Response>() {
@Override @Override
@ -407,7 +418,8 @@ public class NamenodeWebHdfsMethods {
path.getAbsolutePath(), op, destination, owner, group, path.getAbsolutePath(), op, destination, owner, group,
permission, overwrite, bufferSize, replication, blockSize, permission, overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions, createParent, modificationTime, accessTime, renameOptions, createParent,
delegationTokenArgument,aclPermission); delegationTokenArgument, aclPermission, snapshotName,
oldSnapshotName);
} finally { } finally {
reset(); reset();
} }
@ -435,7 +447,9 @@ public class NamenodeWebHdfsMethods {
final RenameOptionSetParam renameOptions, final RenameOptionSetParam renameOptions,
final CreateParentParam createParent, final CreateParentParam createParent,
final TokenArgumentParam delegationTokenArgument, final TokenArgumentParam delegationTokenArgument,
final AclPermissionParam aclPermission final AclPermissionParam aclPermission,
final SnapshotNameParam snapshotName,
final OldSnapshotNameParam oldSnapshotName
) throws IOException, URISyntaxException { ) throws IOException, URISyntaxException {
final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF); final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
@ -535,6 +549,21 @@ public class NamenodeWebHdfsMethods {
np.setAcl(fullpath, aclPermission.getAclPermission(true)); np.setAcl(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build(); return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
} }
case CREATESNAPSHOT: {
String snapshotPath = np.createSnapshot(fullpath, snapshotName.getValue());
final String js = JsonUtil.toJsonString(
org.apache.hadoop.fs.Path.class.getSimpleName(), snapshotPath);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case DELETESNAPSHOT: {
np.deleteSnapshot(fullpath, snapshotName.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case RENAMESNAPSHOT: {
np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
snapshotName.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
default: default:
throw new UnsupportedOperationException(op + " is not supported"); throw new UnsupportedOperationException(op + " is not supported");
} }

View File

@ -874,6 +874,38 @@ public class WebHdfsFileSystem extends FileSystem
new FsPathRunner(op, p, new AclPermissionParam(aclSpec)).run(); new FsPathRunner(op, p, new AclPermissionParam(aclSpec)).run();
} }
@Override
public Path createSnapshot(final Path path, final String snapshotName)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.CREATESNAPSHOT;
Path spath = new FsPathResponseRunner<Path>(op, path,
new SnapshotNameParam(snapshotName)) {
@Override
Path decodeResponse(Map<?,?> json) {
return new Path((String) json.get(Path.class.getSimpleName()));
}
}.run();
return spath;
}
@Override
public void deleteSnapshot(final Path path, final String snapshotName)
throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.DELETESNAPSHOT;
new FsPathRunner(op, path, new SnapshotNameParam(snapshotName)).run();
}
@Override
public void renameSnapshot(final Path path, final String snapshotOldName,
final String snapshotNewName) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.RENAMESNAPSHOT;
new FsPathRunner(op, path, new OldSnapshotNameParam(snapshotOldName),
new SnapshotNameParam(snapshotNewName)).run();
}
@Override @Override
public boolean setReplication(final Path p, final short replication public boolean setReplication(final Path p, final short replication
) throws IOException { ) throws IOException {

View File

@ -0,0 +1,40 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
/**
* The old snapshot name parameter for renameSnapshot operation.
*/
public class OldSnapshotNameParam extends StringParam {
/** Parameter name. */
public static final String NAME = "oldsnapshotname";
/** Default parameter value. */
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
public OldSnapshotNameParam(final String str) {
super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -42,6 +42,10 @@ public class PutOpParam extends HttpOpParam<PutOpParam.Op> {
REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK), REMOVEDEFAULTACL(false, HttpURLConnection.HTTP_OK),
REMOVEACL(false, HttpURLConnection.HTTP_OK), REMOVEACL(false, HttpURLConnection.HTTP_OK),
SETACL(false, HttpURLConnection.HTTP_OK), SETACL(false, HttpURLConnection.HTTP_OK),
CREATESNAPSHOT(false, HttpURLConnection.HTTP_OK),
DELETESNAPSHOT(false, HttpURLConnection.HTTP_OK),
RENAMESNAPSHOT(false, HttpURLConnection.HTTP_OK),
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED); NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);

View File

@ -0,0 +1,41 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.web.resources;
/**
* The snapshot name parameter for createSnapshot and deleteSnapshot operation.
* Also used to indicate the new snapshot name for renameSnapshot operation.
*/
public class SnapshotNameParam extends StringParam {
/** Parameter name. */
public static final String NAME = "snapshotname";
/** Default parameter value. */
public static final String DEFAULT = "";
private static final Domain DOMAIN = new Domain(NAME, null);
public SnapshotNameParam(final String str) {
super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
}
@Override
public String getName() {
return NAME;
}
}

View File

@ -18,6 +18,8 @@
package org.apache.hadoop.hdfs.web; package org.apache.hadoop.hdfs.web;
import static org.junit.Assert.fail;
import java.io.IOException; import java.io.IOException;
import java.net.URISyntaxException; import java.net.URISyntaxException;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
@ -34,8 +36,10 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsAction; import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration; import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.snapshot.SnapshotTestHelper;
import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods; import org.apache.hadoop.hdfs.server.namenode.web.resources.NamenodeWebHdfsMethods;
import org.apache.hadoop.hdfs.TestDFSClientRetries; import org.apache.hadoop.hdfs.TestDFSClientRetries;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -326,4 +330,119 @@ public class TestWebHDFS {
Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY, Assert.assertTrue(conf.getBoolean(DFSConfigKeys.DFS_WEBHDFS_ENABLED_KEY,
false)); false));
} }
/**
* Test snapshot creation through WebHdfs
*/
@Test
public void testWebHdfsCreateSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
try {
webHdfs.createSnapshot(foo);
fail("Cannot create snapshot on a non-snapshottable directory");
} catch (Exception e) {
GenericTestUtils.assertExceptionContains(
"Directory is not a snapshottable directory", e);
}
// allow snapshots on /foo
dfs.allowSnapshot(foo);
// create snapshots on foo using WebHdfs
webHdfs.createSnapshot(foo, "s1");
// create snapshot without specifying name
final Path spath = webHdfs.createSnapshot(foo, null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test snapshot deletion through WebHdfs
*/
@Test
public void testWebHdfsDeleteSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
final Path spath = webHdfs.createSnapshot(foo, null);
Assert.assertTrue(webHdfs.exists(spath));
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// delete the two snapshots
webHdfs.deleteSnapshot(foo, "s1");
Assert.assertFalse(webHdfs.exists(s1path));
webHdfs.deleteSnapshot(foo, spath.getName());
Assert.assertFalse(webHdfs.exists(spath));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
/**
* Test snapshot rename through WebHdfs
*/
@Test
public void testWebHdfsRenameSnapshot() throws Exception {
MiniDFSCluster cluster = null;
final Configuration conf = WebHdfsTestUtil.createConf();
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
cluster.waitActive();
final DistributedFileSystem dfs = cluster.getFileSystem();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsFileSystem.SCHEME);
final Path foo = new Path("/foo");
dfs.mkdirs(foo);
dfs.allowSnapshot(foo);
webHdfs.createSnapshot(foo, "s1");
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// rename s1 to s2
webHdfs.renameSnapshot(foo, "s1", "s2");
Assert.assertFalse(webHdfs.exists(s1path));
final Path s2path = SnapshotTestHelper.getSnapshotRoot(foo, "s2");
Assert.assertTrue(webHdfs.exists(s2path));
webHdfs.deleteSnapshot(foo, "s2");
Assert.assertFalse(webHdfs.exists(s2path));
} finally {
if (cluster != null) {
cluster.shutdown();
}
}
}
} }

View File

@ -357,4 +357,12 @@ public class TestParam {
Assert.assertEquals(p1.getValue(), EnumSet.of( Assert.assertEquals(p1.getValue(), EnumSet.of(
Options.Rename.OVERWRITE, Options.Rename.NONE)); Options.Rename.OVERWRITE, Options.Rename.NONE));
} }
@Test
public void testSnapshotNameParam() {
final OldSnapshotNameParam s1 = new OldSnapshotNameParam("s1");
final SnapshotNameParam s2 = new SnapshotNameParam("s2");
Assert.assertEquals("s1", s1.getValue());
Assert.assertEquals("s2", s2.getValue());
}
} }