HDFS-9337. Validate required params for WebHDFS requests (Contributed by Jagadesh Kiran N)

This commit is contained in:
Vinayakumar B 2016-11-10 16:51:33 +05:30
parent 86ac1ad9fd
commit ca68f9cb5b
4 changed files with 51 additions and 7 deletions

View File

@ -424,6 +424,18 @@ public class NamenodeWebHdfsMethods {
excludeDatanodes, createFlagParam, noredirect);
}
/** Validate all required params. */
@SuppressWarnings("rawtypes")
private void validateOpParams(HttpOpParam<?> op, Param... params) {
for (Param param : params) {
if (param.getValue() == null || param.getValueString() == null || param
.getValueString().isEmpty()) {
throw new IllegalArgumentException("Required param " + param.getName()
+ " for op: " + op.getValueString() + " is null or empty");
}
}
}
/** Handle HTTP PUT request. */
@PUT
@Path("{" + UriFsPathParam.NAME + ":.*}")
@ -576,6 +588,7 @@ public class NamenodeWebHdfsMethods {
}
case CREATESYMLINK:
{
validateOpParams(op, destination);
np.createSymlink(destination.getValue(), fullpath,
PermissionParam.getDefaultSymLinkFsPermission(),
createParent.getValue());
@ -583,6 +596,7 @@ public class NamenodeWebHdfsMethods {
}
case RENAME:
{
validateOpParams(op, destination);
final EnumSet<Options.Rename> s = renameOptions.getValue();
if (s.isEmpty()) {
final boolean b = np.rename(fullpath, destination.getValue());
@ -621,6 +635,7 @@ public class NamenodeWebHdfsMethods {
}
case RENEWDELEGATIONTOKEN:
{
validateOpParams(op, delegationTokenArgument);
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(delegationTokenArgument.getValue());
final long expiryTime = np.renewDelegationToken(token);
@ -629,16 +644,19 @@ public class NamenodeWebHdfsMethods {
}
case CANCELDELEGATIONTOKEN:
{
validateOpParams(op, delegationTokenArgument);
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
token.decodeFromUrlString(delegationTokenArgument.getValue());
np.cancelDelegationToken(token);
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case MODIFYACLENTRIES: {
validateOpParams(op, aclPermission);
np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEACLENTRIES: {
validateOpParams(op, aclPermission);
np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
@ -651,10 +669,12 @@ public class NamenodeWebHdfsMethods {
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETACL: {
validateOpParams(op, aclPermission);
np.setAcl(fullpath, aclPermission.getAclPermission(true));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case SETXATTR: {
validateOpParams(op, xattrName, xattrSetFlag);
np.setXAttr(
fullpath,
XAttrHelper.buildXAttr(xattrName.getXAttrName(),
@ -662,6 +682,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
case REMOVEXATTR: {
validateOpParams(op, xattrName);
np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}
@ -676,6 +697,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case RENAMESNAPSHOT: {
validateOpParams(op, oldSnapshotName, snapshotName);
np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
snapshotName.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
@ -794,15 +816,13 @@ public class NamenodeWebHdfsMethods {
}
case CONCAT:
{
validateOpParams(op, concatSrcs);
np.concat(fullpath, concatSrcs.getAbsolutePaths());
return Response.ok().build();
}
case TRUNCATE:
{
if (newLength.getValue() == null) {
throw new IllegalArgumentException(
"newLength parameter is Missing");
}
validateOpParams(op, newLength);
// We treat each rest request as a separate client.
final boolean b = np.truncate(fullpath, newLength.getValue(),
"DFSClient_" + DFSUtil.getSecureRandom().nextLong());
@ -1033,6 +1053,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETXATTRS: {
validateOpParams(op, xattrEncoding);
List<String> names = null;
if (xattrNames != null) {
names = Lists.newArrayListWithCapacity(xattrNames.size());
@ -1054,6 +1075,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case CHECKACCESS: {
validateOpParams(op, fsAction);
np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
return Response.ok().build();
}
@ -1222,6 +1244,7 @@ public class NamenodeWebHdfsMethods {
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case DELETESNAPSHOT: {
validateOpParams(op, snapshotName);
np.deleteSnapshot(fullpath, snapshotName.getValue());
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
}

View File

@ -1203,7 +1203,8 @@ Delegation Token Operations
* Submit a HTTP GET request.
curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN&renewer=<USER>&service=<SERVICE>&kind=<KIND>"
curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN
[&renewer=<USER>][&service=<SERVICE>][&kind=<KIND>]"
The client receives a response with a [`Token` JSON object](#Token_JSON_Schema):

View File

@ -287,7 +287,8 @@ public class FSXAttrBaseTest {
} catch (NullPointerException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
} catch (RemoteException e) {
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
GenericTestUtils.assertExceptionContains("Required param xattr.name for "
+ "op: SETXATTR is null or empty", e);
}
// Set xattr with empty name: "user."

View File

@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.io.retry.RetryPolicy;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.ipc.RetriableException;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
@ -527,6 +528,15 @@ public class TestWebHDFS {
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// delete operation snapshot name as null
try {
webHdfs.deleteSnapshot(foo, null);
fail("Expected IllegalArgumentException");
} catch (RemoteException e) {
Assert.assertEquals("Required param snapshotname for "
+ "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
}
// delete the two snapshots
webHdfs.deleteSnapshot(foo, "s1");
assertFalse(webHdfs.exists(s1path));
@ -585,6 +595,15 @@ public class TestWebHDFS {
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
Assert.assertTrue(webHdfs.exists(s1path));
// rename s1 to s2 with oldsnapshotName as null
try {
webHdfs.renameSnapshot(foo, null, "s2");
fail("Expected IllegalArgumentException");
} catch (RemoteException e) {
Assert.assertEquals("Required param oldsnapshotname for "
+ "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
}
// rename s1 to s2
webHdfs.renameSnapshot(foo, "s1", "s2");
assertFalse(webHdfs.exists(s1path));
@ -643,7 +662,7 @@ public class TestWebHDFS {
try {
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
WebHdfsConstants.WEBHDFS_SCHEME);
WebHdfsConstants.WEBHDFS_SCHEME);
Assert.assertNull(webHdfs.getDelegationToken(null));
} finally {
if (cluster != null) {