HDFS-2432. Webhdfs: response FORBIDDEN when setReplication on non-files; clear umask before creating a flie; throw IllegalArgumentException if setOwner with both owner and group empty; throw FileNotFoundException if getFileStatus on non-existing files; fix bugs in getBlockLocations; and changed getFileChecksum json response root to "FileChecksum".

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1190077 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-10-27 23:13:26 +00:00
parent 221aadbc5b
commit 8cb0d4b380
6 changed files with 104 additions and 33 deletions

View File

@ -1208,6 +1208,12 @@ Release 0.23.0 - Unreleased
HDFS-2494. Close the streams and DFSClient in DatanodeWebHdfsMethods. HDFS-2494. Close the streams and DFSClient in DatanodeWebHdfsMethods.
(Uma Maheswara Rao G via szetszwo) (Uma Maheswara Rao G via szetszwo)
HDFS-2432. Webhdfs: response FORBIDDEN when setReplication on non-files;
clear umask before creating a flie; throw IllegalArgumentException if
setOwner with both owner and group empty; throw FileNotFoundException if
getFileStatus on non-existing files; fix bugs in getBlockLocations; and
changed getFileChecksum json response root to "FileChecksum". (szetszwo)
BREAKDOWN OF HDFS-1073 SUBTASKS BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts. HDFS-1521. Persist transaction ID on disk between NN restarts.

View File

@ -48,6 +48,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag; import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream; import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DataNode;
@ -152,6 +153,8 @@ public class DatanodeWebHdfsMethods {
{ {
final Configuration conf = new Configuration(datanode.getConf()); final Configuration conf = new Configuration(datanode.getConf());
final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf); final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
conf.set(FsPermission.UMASK_LABEL, "000");
final int b = bufferSize.getValue(conf); final int b = bufferSize.getValue(conf);
DFSClient dfsclient = new DFSClient(nnRpcAddr, conf); DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
FSDataOutputStream out = null; FSDataOutputStream out = null;
@ -307,12 +310,12 @@ public class DatanodeWebHdfsMethods {
final DataNode datanode = (DataNode)context.getAttribute("datanode"); final DataNode datanode = (DataNode)context.getAttribute("datanode");
final Configuration conf = new Configuration(datanode.getConf()); final Configuration conf = new Configuration(datanode.getConf());
final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf); final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
switch(op.getValue()) { switch(op.getValue()) {
case OPEN: case OPEN:
{ {
final int b = bufferSize.getValue(conf); final int b = bufferSize.getValue(conf);
final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
DFSDataInputStream in = null; DFSDataInputStream in = null;
try { try {
in = new DFSClient.DFSDataInputStream( in = new DFSClient.DFSDataInputStream(
@ -355,13 +358,13 @@ public class DatanodeWebHdfsMethods {
case GETFILECHECKSUM: case GETFILECHECKSUM:
{ {
MD5MD5CRC32FileChecksum checksum = null; MD5MD5CRC32FileChecksum checksum = null;
DFSClient client = dfsclient; DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
try { try {
checksum = client.getFileChecksum(fullpath); checksum = dfsclient.getFileChecksum(fullpath);
client.close(); dfsclient.close();
client = null; dfsclient = null;
} finally { } finally {
IOUtils.cleanup(LOG, client); IOUtils.cleanup(LOG, dfsclient);
} }
final String js = JsonUtil.toJsonString(checksum); final String js = JsonUtil.toJsonString(checksum);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();

View File

@ -42,7 +42,9 @@ import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context; import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType; import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response; import javax.ws.rs.core.Response;
import javax.ws.rs.core.Response.ResponseBuilder;
import javax.ws.rs.core.StreamingOutput; import javax.ws.rs.core.StreamingOutput;
import javax.ws.rs.core.Response.Status;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
@ -318,10 +320,15 @@ public class NamenodeWebHdfsMethods {
{ {
final boolean b = np.setReplication(fullpath, replication.getValue(conf)); final boolean b = np.setReplication(fullpath, replication.getValue(conf));
final String js = JsonUtil.toJsonString("boolean", b); final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); final ResponseBuilder r = b? Response.ok(): Response.status(Status.FORBIDDEN);
return r.entity(js).type(MediaType.APPLICATION_JSON).build();
} }
case SETOWNER: case SETOWNER:
{ {
if (owner.getValue() == null && group.getValue() == null) {
throw new IllegalArgumentException("Both owner and group are empty.");
}
np.setOwner(fullpath, owner.getValue(), group.getValue()); np.setOwner(fullpath, owner.getValue(), group.getValue());
return Response.ok().type(MediaType.APPLICATION_JSON).build(); return Response.ok().type(MediaType.APPLICATION_JSON).build();
} }
@ -487,13 +494,17 @@ public class NamenodeWebHdfsMethods {
final long offsetValue = offset.getValue(); final long offsetValue = offset.getValue();
final Long lengthValue = length.getValue(); final Long lengthValue = length.getValue();
final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath, final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
offsetValue, lengthValue != null? lengthValue: offsetValue + 1); offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE);
final String js = JsonUtil.toJsonString(locatedblocks); final String js = JsonUtil.toJsonString(locatedblocks);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }
case GETFILESTATUS: case GETFILESTATUS:
{ {
final HdfsFileStatus status = np.getFileInfo(fullpath); final HdfsFileStatus status = np.getFileInfo(fullpath);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + fullpath);
}
final String js = JsonUtil.toJsonString(status, true); final String js = JsonUtil.toJsonString(status, true);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build(); return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} }

View File

@ -27,6 +27,7 @@ import java.util.Map;
import java.util.TreeMap; import java.util.TreeMap;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
@ -334,7 +335,7 @@ public class JsonUtil {
} else { } else {
final Object[] a = new Object[array.size()]; final Object[] a = new Object[array.size()];
for(int i = 0; i < array.size(); i++) { for(int i = 0; i < array.size(); i++) {
a[i] = toJsonMap(array.get(0)); a[i] = toJsonMap(array.get(i));
} }
return a; return a;
} }
@ -436,7 +437,7 @@ public class JsonUtil {
m.put("algorithm", checksum.getAlgorithmName()); m.put("algorithm", checksum.getAlgorithmName());
m.put("length", checksum.getLength()); m.put("length", checksum.getLength());
m.put("bytes", StringUtils.byteToHexString(checksum.getBytes())); m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
return toJsonString(MD5MD5CRC32FileChecksum.class, m); return toJsonString(FileChecksum.class, m);
} }
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */ /** Convert a Json map to a MD5MD5CRC32FileChecksum. */
@ -446,8 +447,7 @@ public class JsonUtil {
return null; return null;
} }
final Map<?, ?> m = (Map<?, ?>)json.get( final Map<?, ?> m = (Map<?, ?>)json.get(FileChecksum.class.getSimpleName());
MD5MD5CRC32FileChecksum.class.getSimpleName());
final String algorithm = (String)m.get("algorithm"); final String algorithm = (String)m.get("algorithm");
final int length = (int)(long)(Long)m.get("length"); final int length = (int)(long)(Long)m.get("length");
final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes")); final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));

View File

@ -154,19 +154,18 @@ public class WebHdfsFileSystem extends HftpFileSystem {
return f.isAbsolute()? f: new Path(workingDir, f); return f.isAbsolute()? f: new Path(workingDir, f);
} }
@SuppressWarnings("unchecked") private static Map<?, ?> jsonParse(final InputStream in) throws IOException {
private static <T> T jsonParse(final InputStream in) throws IOException {
if (in == null) { if (in == null) {
throw new IOException("The input stream is null."); throw new IOException("The input stream is null.");
} }
return (T)JSON.parse(new InputStreamReader(in)); return (Map<?, ?>)JSON.parse(new InputStreamReader(in));
} }
private static void validateResponse(final HttpOpParam.Op op, private static Map<?, ?> validateResponse(final HttpOpParam.Op op,
final HttpURLConnection conn) throws IOException { final HttpURLConnection conn) throws IOException {
final int code = conn.getResponseCode(); final int code = conn.getResponseCode();
if (code != op.getExpectedHttpResponseCode()) { if (code != op.getExpectedHttpResponseCode()) {
final Map<String, Object> m; final Map<?, ?> m;
try { try {
m = jsonParse(conn.getErrorStream()); m = jsonParse(conn.getErrorStream());
} catch(IOException e) { } catch(IOException e) {
@ -175,6 +174,10 @@ public class WebHdfsFileSystem extends HftpFileSystem {
+ ", message=" + conn.getResponseMessage(), e); + ", message=" + conn.getResponseMessage(), e);
} }
if (m.get(RemoteException.class.getSimpleName()) == null) {
return m;
}
final RemoteException re = JsonUtil.toRemoteException(m); final RemoteException re = JsonUtil.toRemoteException(m);
throw re.unwrapRemoteException(AccessControlException.class, throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class, DSQuotaExceededException.class,
@ -185,6 +188,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
NSQuotaExceededException.class, NSQuotaExceededException.class,
UnresolvedPathException.class); UnresolvedPathException.class);
} }
return null;
} }
URL toUrl(final HttpOpParam.Op op, final Path fspath, URL toUrl(final HttpOpParam.Op op, final Path fspath,
@ -235,15 +239,15 @@ public class WebHdfsFileSystem extends HftpFileSystem {
* @param op http operation * @param op http operation
* @param fspath file system path * @param fspath file system path
* @param parameters parameters for the operation * @param parameters parameters for the operation
* @return a JSON object, e.g. Object[], Map<String, Object>, etc. * @return a JSON object, e.g. Object[], Map<?, ?>, etc.
* @throws IOException * @throws IOException
*/ */
private <T> T run(final HttpOpParam.Op op, final Path fspath, private Map<?, ?> run(final HttpOpParam.Op op, final Path fspath,
final Param<?,?>... parameters) throws IOException { final Param<?,?>... parameters) throws IOException {
final HttpURLConnection conn = httpConnect(op, fspath, parameters); final HttpURLConnection conn = httpConnect(op, fspath, parameters);
validateResponse(op, conn);
try { try {
return WebHdfsFileSystem.<T>jsonParse(conn.getInputStream()); final Map<?, ?> m = validateResponse(op, conn);
return m != null? m: jsonParse(conn.getInputStream());
} finally { } finally {
conn.disconnect(); conn.disconnect();
} }
@ -258,7 +262,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException { private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS; final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
final Map<String, Object> json = run(op, f); final Map<?, ?> json = run(op, f);
final HdfsFileStatus status = JsonUtil.toFileStatus(json, true); final HdfsFileStatus status = JsonUtil.toFileStatus(json, true);
if (status == null) { if (status == null) {
throw new FileNotFoundException("File does not exist: " + f); throw new FileNotFoundException("File does not exist: " + f);
@ -284,7 +288,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
public boolean mkdirs(Path f, FsPermission permission) throws IOException { public boolean mkdirs(Path f, FsPermission permission) throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.MKDIRS; final HttpOpParam.Op op = PutOpParam.Op.MKDIRS;
final Map<String, Object> json = run(op, f, final Map<?, ?> json = run(op, f,
new PermissionParam(applyUMask(permission))); new PermissionParam(applyUMask(permission)));
return (Boolean)json.get("boolean"); return (Boolean)json.get("boolean");
} }
@ -293,7 +297,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
public boolean rename(final Path src, final Path dst) throws IOException { public boolean rename(final Path src, final Path dst) throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.RENAME; final HttpOpParam.Op op = PutOpParam.Op.RENAME;
final Map<String, Object> json = run(op, src, final Map<?, ?> json = run(op, src,
new DestinationParam(makeQualified(dst).toUri().getPath())); new DestinationParam(makeQualified(dst).toUri().getPath()));
return (Boolean)json.get("boolean"); return (Boolean)json.get("boolean");
} }
@ -333,8 +337,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
) throws IOException { ) throws IOException {
statistics.incrementWriteOps(1); statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION; final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
final Map<String, Object> json = run(op, p, final Map<?, ?> json = run(op, p, new ReplicationParam(replication));
new ReplicationParam(replication));
return (Boolean)json.get("boolean"); return (Boolean)json.get("boolean");
} }
@ -403,7 +406,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
@Override @Override
public boolean delete(Path f, boolean recursive) throws IOException { public boolean delete(Path f, boolean recursive) throws IOException {
final HttpOpParam.Op op = DeleteOpParam.Op.DELETE; final HttpOpParam.Op op = DeleteOpParam.Op.DELETE;
final Map<String, Object> json = run(op, f, new RecursiveParam(recursive)); final Map<?, ?> json = run(op, f, new RecursiveParam(recursive));
return (Boolean)json.get("boolean"); return (Boolean)json.get("boolean");
} }
@ -428,8 +431,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
//convert FileStatus //convert FileStatus
final FileStatus[] statuses = new FileStatus[array.length]; final FileStatus[] statuses = new FileStatus[array.length];
for(int i = 0; i < array.length; i++) { for(int i = 0; i < array.length; i++) {
@SuppressWarnings("unchecked") final Map<?, ?> m = (Map<?, ?>)array[i];
final Map<String, Object> m = (Map<String, Object>)array[i];
statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f); statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
} }
return statuses; return statuses;
@ -439,7 +441,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer public Token<DelegationTokenIdentifier> getDelegationToken(final String renewer
) throws IOException { ) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN; final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
final Map<String, Object> m = run(op, null, new RenewerParam(renewer)); final Map<?, ?> m = run(op, null, new RenewerParam(renewer));
final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m); final Token<DelegationTokenIdentifier> token = JsonUtil.toDelegationToken(m);
token.setService(new Text(getCanonicalServiceName())); token.setService(new Text(getCanonicalServiceName()));
return token; return token;
@ -467,7 +469,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS; final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
final Map<String, Object> m = run(op, p, new OffsetParam(offset), final Map<?, ?> m = run(op, p, new OffsetParam(offset),
new LengthParam(length)); new LengthParam(length));
return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m)); return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
} }
@ -477,7 +479,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY; final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY;
final Map<String, Object> m = run(op, p); final Map<?, ?> m = run(op, p);
return JsonUtil.toContentSummary(m); return JsonUtil.toContentSummary(m);
} }
@ -487,7 +489,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
statistics.incrementReadOps(1); statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM; final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
final Map<String, Object> m = run(op, p); final Map<?, ?> m = run(op, p);
return JsonUtil.toMD5MD5CRC32FileChecksum(m); return JsonUtil.toMD5MD5CRC32FileChecksum(m);
} }
} }

View File

@ -27,6 +27,8 @@ import java.net.URI;
import java.net.URL; import java.net.URL;
import java.security.PrivilegedExceptionAction; import java.security.PrivilegedExceptionAction;
import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation; import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataInputStream;
@ -39,6 +41,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.web.resources.GetOpParam; import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam; import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.UserGroupInformation;
@ -257,4 +260,50 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
WebHdfsFileSystem.LOG.info("This is expected.", e); WebHdfsFileSystem.LOG.info("This is expected.", e);
} }
} }
public void testResponseCode() throws IOException {
final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
final Path dir = new Path("/test/testUrl");
assertTrue(webhdfs.mkdirs(dir));
{//test set owner with empty parameters
final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
conn.disconnect();
}
{//test set replication on a directory
final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
final URL url = webhdfs.toUrl(op, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
assertFalse(webhdfs.setReplication(dir, (short)1));
conn.disconnect();
}
{//test get file status for a non-exist file.
final Path p = new Path(dir, "non-exist");
final URL url = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, p);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.connect();
assertEquals(HttpServletResponse.SC_NOT_FOUND, conn.getResponseCode());
conn.disconnect();
}
{//test set permission with empty parameters
final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
final URL url = webhdfs.toUrl(op, dir);
final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
conn.setRequestMethod(op.getType().toString());
conn.connect();
assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
conn.disconnect();
}
}
} }