HDFS-9337. Validate required params for WebHDFS requests (Contributed by Jagadesh Kiran N)
This commit is contained in:
parent
86ac1ad9fd
commit
ca68f9cb5b
|
@ -424,6 +424,18 @@ public class NamenodeWebHdfsMethods {
|
||||||
excludeDatanodes, createFlagParam, noredirect);
|
excludeDatanodes, createFlagParam, noredirect);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/** Validate all required params. */
|
||||||
|
@SuppressWarnings("rawtypes")
|
||||||
|
private void validateOpParams(HttpOpParam<?> op, Param... params) {
|
||||||
|
for (Param param : params) {
|
||||||
|
if (param.getValue() == null || param.getValueString() == null || param
|
||||||
|
.getValueString().isEmpty()) {
|
||||||
|
throw new IllegalArgumentException("Required param " + param.getName()
|
||||||
|
+ " for op: " + op.getValueString() + " is null or empty");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/** Handle HTTP PUT request. */
|
/** Handle HTTP PUT request. */
|
||||||
@PUT
|
@PUT
|
||||||
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
@Path("{" + UriFsPathParam.NAME + ":.*}")
|
||||||
|
@ -576,6 +588,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
}
|
}
|
||||||
case CREATESYMLINK:
|
case CREATESYMLINK:
|
||||||
{
|
{
|
||||||
|
validateOpParams(op, destination);
|
||||||
np.createSymlink(destination.getValue(), fullpath,
|
np.createSymlink(destination.getValue(), fullpath,
|
||||||
PermissionParam.getDefaultSymLinkFsPermission(),
|
PermissionParam.getDefaultSymLinkFsPermission(),
|
||||||
createParent.getValue());
|
createParent.getValue());
|
||||||
|
@ -583,6 +596,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
}
|
}
|
||||||
case RENAME:
|
case RENAME:
|
||||||
{
|
{
|
||||||
|
validateOpParams(op, destination);
|
||||||
final EnumSet<Options.Rename> s = renameOptions.getValue();
|
final EnumSet<Options.Rename> s = renameOptions.getValue();
|
||||||
if (s.isEmpty()) {
|
if (s.isEmpty()) {
|
||||||
final boolean b = np.rename(fullpath, destination.getValue());
|
final boolean b = np.rename(fullpath, destination.getValue());
|
||||||
|
@ -621,6 +635,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
}
|
}
|
||||||
case RENEWDELEGATIONTOKEN:
|
case RENEWDELEGATIONTOKEN:
|
||||||
{
|
{
|
||||||
|
validateOpParams(op, delegationTokenArgument);
|
||||||
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
|
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
|
||||||
token.decodeFromUrlString(delegationTokenArgument.getValue());
|
token.decodeFromUrlString(delegationTokenArgument.getValue());
|
||||||
final long expiryTime = np.renewDelegationToken(token);
|
final long expiryTime = np.renewDelegationToken(token);
|
||||||
|
@ -629,16 +644,19 @@ public class NamenodeWebHdfsMethods {
|
||||||
}
|
}
|
||||||
case CANCELDELEGATIONTOKEN:
|
case CANCELDELEGATIONTOKEN:
|
||||||
{
|
{
|
||||||
|
validateOpParams(op, delegationTokenArgument);
|
||||||
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
|
final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>();
|
||||||
token.decodeFromUrlString(delegationTokenArgument.getValue());
|
token.decodeFromUrlString(delegationTokenArgument.getValue());
|
||||||
np.cancelDelegationToken(token);
|
np.cancelDelegationToken(token);
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case MODIFYACLENTRIES: {
|
case MODIFYACLENTRIES: {
|
||||||
|
validateOpParams(op, aclPermission);
|
||||||
np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
|
np.modifyAclEntries(fullpath, aclPermission.getAclPermission(true));
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case REMOVEACLENTRIES: {
|
case REMOVEACLENTRIES: {
|
||||||
|
validateOpParams(op, aclPermission);
|
||||||
np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
|
np.removeAclEntries(fullpath, aclPermission.getAclPermission(false));
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
|
@ -651,10 +669,12 @@ public class NamenodeWebHdfsMethods {
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case SETACL: {
|
case SETACL: {
|
||||||
|
validateOpParams(op, aclPermission);
|
||||||
np.setAcl(fullpath, aclPermission.getAclPermission(true));
|
np.setAcl(fullpath, aclPermission.getAclPermission(true));
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case SETXATTR: {
|
case SETXATTR: {
|
||||||
|
validateOpParams(op, xattrName, xattrSetFlag);
|
||||||
np.setXAttr(
|
np.setXAttr(
|
||||||
fullpath,
|
fullpath,
|
||||||
XAttrHelper.buildXAttr(xattrName.getXAttrName(),
|
XAttrHelper.buildXAttr(xattrName.getXAttrName(),
|
||||||
|
@ -662,6 +682,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
case REMOVEXATTR: {
|
case REMOVEXATTR: {
|
||||||
|
validateOpParams(op, xattrName);
|
||||||
np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
|
np.removeXAttr(fullpath, XAttrHelper.buildXAttr(xattrName.getXAttrName()));
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
|
@ -676,6 +697,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||||
}
|
}
|
||||||
case RENAMESNAPSHOT: {
|
case RENAMESNAPSHOT: {
|
||||||
|
validateOpParams(op, oldSnapshotName, snapshotName);
|
||||||
np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
|
np.renameSnapshot(fullpath, oldSnapshotName.getValue(),
|
||||||
snapshotName.getValue());
|
snapshotName.getValue());
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
|
@ -794,15 +816,13 @@ public class NamenodeWebHdfsMethods {
|
||||||
}
|
}
|
||||||
case CONCAT:
|
case CONCAT:
|
||||||
{
|
{
|
||||||
|
validateOpParams(op, concatSrcs);
|
||||||
np.concat(fullpath, concatSrcs.getAbsolutePaths());
|
np.concat(fullpath, concatSrcs.getAbsolutePaths());
|
||||||
return Response.ok().build();
|
return Response.ok().build();
|
||||||
}
|
}
|
||||||
case TRUNCATE:
|
case TRUNCATE:
|
||||||
{
|
{
|
||||||
if (newLength.getValue() == null) {
|
validateOpParams(op, newLength);
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"newLength parameter is Missing");
|
|
||||||
}
|
|
||||||
// We treat each rest request as a separate client.
|
// We treat each rest request as a separate client.
|
||||||
final boolean b = np.truncate(fullpath, newLength.getValue(),
|
final boolean b = np.truncate(fullpath, newLength.getValue(),
|
||||||
"DFSClient_" + DFSUtil.getSecureRandom().nextLong());
|
"DFSClient_" + DFSUtil.getSecureRandom().nextLong());
|
||||||
|
@ -1033,6 +1053,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||||
}
|
}
|
||||||
case GETXATTRS: {
|
case GETXATTRS: {
|
||||||
|
validateOpParams(op, xattrEncoding);
|
||||||
List<String> names = null;
|
List<String> names = null;
|
||||||
if (xattrNames != null) {
|
if (xattrNames != null) {
|
||||||
names = Lists.newArrayListWithCapacity(xattrNames.size());
|
names = Lists.newArrayListWithCapacity(xattrNames.size());
|
||||||
|
@ -1054,6 +1075,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||||
}
|
}
|
||||||
case CHECKACCESS: {
|
case CHECKACCESS: {
|
||||||
|
validateOpParams(op, fsAction);
|
||||||
np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
|
np.checkAccess(fullpath, FsAction.getFsAction(fsAction.getValue()));
|
||||||
return Response.ok().build();
|
return Response.ok().build();
|
||||||
}
|
}
|
||||||
|
@ -1222,6 +1244,7 @@ public class NamenodeWebHdfsMethods {
|
||||||
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
|
||||||
}
|
}
|
||||||
case DELETESNAPSHOT: {
|
case DELETESNAPSHOT: {
|
||||||
|
validateOpParams(op, snapshotName);
|
||||||
np.deleteSnapshot(fullpath, snapshotName.getValue());
|
np.deleteSnapshot(fullpath, snapshotName.getValue());
|
||||||
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
return Response.ok().type(MediaType.APPLICATION_OCTET_STREAM).build();
|
||||||
}
|
}
|
||||||
|
|
|
@ -1203,7 +1203,8 @@ Delegation Token Operations
|
||||||
|
|
||||||
* Submit a HTTP GET request.
|
* Submit a HTTP GET request.
|
||||||
|
|
||||||
curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN&renewer=<USER>&service=<SERVICE>&kind=<KIND>"
|
curl -i "http://<HOST>:<PORT>/webhdfs/v1/?op=GETDELEGATIONTOKEN
|
||||||
|
[&renewer=<USER>][&service=<SERVICE>][&kind=<KIND>]"
|
||||||
|
|
||||||
The client receives a response with a [`Token` JSON object](#Token_JSON_Schema):
|
The client receives a response with a [`Token` JSON object](#Token_JSON_Schema):
|
||||||
|
|
||||||
|
|
|
@ -287,7 +287,8 @@ public class FSXAttrBaseTest {
|
||||||
} catch (NullPointerException e) {
|
} catch (NullPointerException e) {
|
||||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
|
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
|
||||||
} catch (RemoteException e) {
|
} catch (RemoteException e) {
|
||||||
GenericTestUtils.assertExceptionContains("XAttr name cannot be null", e);
|
GenericTestUtils.assertExceptionContains("Required param xattr.name for "
|
||||||
|
+ "op: SETXATTR is null or empty", e);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set xattr with empty name: "user."
|
// Set xattr with empty name: "user."
|
||||||
|
|
|
@ -75,6 +75,7 @@ import org.apache.hadoop.hdfs.web.resources.Param;
|
||||||
import org.apache.hadoop.io.retry.RetryPolicy;
|
import org.apache.hadoop.io.retry.RetryPolicy;
|
||||||
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
|
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
|
||||||
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
|
import org.apache.hadoop.io.retry.RetryPolicy.RetryAction.RetryDecision;
|
||||||
|
import org.apache.hadoop.ipc.RemoteException;
|
||||||
import org.apache.hadoop.ipc.RetriableException;
|
import org.apache.hadoop.ipc.RetriableException;
|
||||||
import org.apache.hadoop.security.AccessControlException;
|
import org.apache.hadoop.security.AccessControlException;
|
||||||
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
|
||||||
|
@ -527,6 +528,15 @@ public class TestWebHDFS {
|
||||||
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
||||||
Assert.assertTrue(webHdfs.exists(s1path));
|
Assert.assertTrue(webHdfs.exists(s1path));
|
||||||
|
|
||||||
|
// delete operation snapshot name as null
|
||||||
|
try {
|
||||||
|
webHdfs.deleteSnapshot(foo, null);
|
||||||
|
fail("Expected IllegalArgumentException");
|
||||||
|
} catch (RemoteException e) {
|
||||||
|
Assert.assertEquals("Required param snapshotname for "
|
||||||
|
+ "op: DELETESNAPSHOT is null or empty", e.getLocalizedMessage());
|
||||||
|
}
|
||||||
|
|
||||||
// delete the two snapshots
|
// delete the two snapshots
|
||||||
webHdfs.deleteSnapshot(foo, "s1");
|
webHdfs.deleteSnapshot(foo, "s1");
|
||||||
assertFalse(webHdfs.exists(s1path));
|
assertFalse(webHdfs.exists(s1path));
|
||||||
|
@ -585,6 +595,15 @@ public class TestWebHDFS {
|
||||||
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
final Path s1path = SnapshotTestHelper.getSnapshotRoot(foo, "s1");
|
||||||
Assert.assertTrue(webHdfs.exists(s1path));
|
Assert.assertTrue(webHdfs.exists(s1path));
|
||||||
|
|
||||||
|
// rename s1 to s2 with oldsnapshotName as null
|
||||||
|
try {
|
||||||
|
webHdfs.renameSnapshot(foo, null, "s2");
|
||||||
|
fail("Expected IllegalArgumentException");
|
||||||
|
} catch (RemoteException e) {
|
||||||
|
Assert.assertEquals("Required param oldsnapshotname for "
|
||||||
|
+ "op: RENAMESNAPSHOT is null or empty", e.getLocalizedMessage());
|
||||||
|
}
|
||||||
|
|
||||||
// rename s1 to s2
|
// rename s1 to s2
|
||||||
webHdfs.renameSnapshot(foo, "s1", "s2");
|
webHdfs.renameSnapshot(foo, "s1", "s2");
|
||||||
assertFalse(webHdfs.exists(s1path));
|
assertFalse(webHdfs.exists(s1path));
|
||||||
|
@ -643,7 +662,7 @@ public class TestWebHDFS {
|
||||||
try {
|
try {
|
||||||
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
|
||||||
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
final FileSystem webHdfs = WebHdfsTestUtil.getWebHdfsFileSystem(conf,
|
||||||
WebHdfsConstants.WEBHDFS_SCHEME);
|
WebHdfsConstants.WEBHDFS_SCHEME);
|
||||||
Assert.assertNull(webHdfs.getDelegationToken(null));
|
Assert.assertNull(webHdfs.getDelegationToken(null));
|
||||||
} finally {
|
} finally {
|
||||||
if (cluster != null) {
|
if (cluster != null) {
|
||||||
|
|
Loading…
Reference in New Issue