HDFS-2540. Webhdfs: change "Expect: 100-continue" to two-step write; change "HdfsFileStatus" and "localName" respectively to "FileStatus" and "pathSuffix" in JSON response.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1199396 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2011-11-08 19:25:57 +00:00
parent 80cc8e9486
commit 94c631af1f
5 changed files with 104 additions and 25 deletions

View File

@ -116,18 +116,6 @@ Release 0.23.1 - UNRELEASED
BUG FIXES BUG FIXES
HDFS-2416. distcp with a webhdfs uri on a secure cluster fails. (jitendra)
HDFS-2527. WebHdfs: remove the use of "Range" header in Open; use ugi
username if renewer parameter is null in GetDelegationToken; response OK
when setting replication for non-files; rename GETFILEBLOCKLOCATIONS to
GET_BLOCK_LOCATIONS and state that it is a private unstable API; replace
isDirectory and isSymlink with enum {FILE, DIRECTORY, SYMLINK} in
HdfsFileStatus JSON object. (szetszwo)
HDFS-2528. Webhdfs: set delegation kind to WEBHDFS and add a HDFS token
when http requests are redirected to datanode. (szetszwo)
Release 0.23.0 - 2011-11-01 Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES
@ -1284,6 +1272,22 @@ Release 0.23.0 - 2011-11-01
HDFS-2065. Add null checks in DFSClient.getFileChecksum(..). (Uma HDFS-2065. Add null checks in DFSClient.getFileChecksum(..). (Uma
Maheswara Rao G via szetszwo) Maheswara Rao G via szetszwo)
HDFS-2416. distcp with a webhdfs uri on a secure cluster fails. (jitendra)
HDFS-2527. WebHdfs: remove the use of "Range" header in Open; use ugi
username if renewer parameter is null in GetDelegationToken; response OK
when setting replication for non-files; rename GETFILEBLOCKLOCATIONS to
GET_BLOCK_LOCATIONS and state that it is a private unstable API; replace
isDirectory and isSymlink with enum {FILE, DIRECTORY, SYMLINK} in
HdfsFileStatus JSON object. (szetszwo)
HDFS-2528. Webhdfs: set delegation kind to WEBHDFS and add a HDFS token
when http requests are redirected to datanode. (szetszwo)
HDFS-2540. Webhdfs: change "Expect: 100-continue" to two-step write; change
"HdfsFileStatus" and "localName" respectively to "FileStatus" and
"pathSuffix" in JSON response. (szetszwo)
BREAKDOWN OF HDFS-1073 SUBTASKS BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts. HDFS-1521. Persist transaction ID on disk between NN restarts.

View File

@ -48,6 +48,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory; import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Options; import org.apache.hadoop.fs.Options;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing; import org.apache.hadoop.hdfs.protocol.DirectoryListing;
@ -577,8 +578,8 @@ public class NamenodeWebHdfsMethods {
@Override @Override
public void write(final OutputStream outstream) throws IOException { public void write(final OutputStream outstream) throws IOException {
final PrintStream out = new PrintStream(outstream); final PrintStream out = new PrintStream(outstream);
out.println("{\"" + HdfsFileStatus.class.getSimpleName() + "es\":{\"" out.println("{\"" + FileStatus.class.getSimpleName() + "es\":{\""
+ HdfsFileStatus.class.getSimpleName() + "\":["); + FileStatus.class.getSimpleName() + "\":[");
final HdfsFileStatus[] partial = first.getPartialListing(); final HdfsFileStatus[] partial = first.getPartialListing();
if (partial.length > 0) { if (partial.length > 0) {

View File

@ -28,6 +28,7 @@ import java.util.TreeMap;
import org.apache.hadoop.fs.ContentSummary; import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.FileChecksum; import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdfs.DFSUtil;
@ -149,7 +150,7 @@ public class JsonUtil {
return null; return null;
} }
final Map<String, Object> m = new TreeMap<String, Object>(); final Map<String, Object> m = new TreeMap<String, Object>();
m.put("localName", status.getLocalName()); m.put("pathSuffix", status.getLocalName());
m.put("type", PathType.valueOf(status)); m.put("type", PathType.valueOf(status));
if (status.isSymlink()) { if (status.isSymlink()) {
m.put("symlink", status.getSymlink()); m.put("symlink", status.getSymlink());
@ -163,8 +164,7 @@ public class JsonUtil {
m.put("modificationTime", status.getModificationTime()); m.put("modificationTime", status.getModificationTime());
m.put("blockSize", status.getBlockSize()); m.put("blockSize", status.getBlockSize());
m.put("replication", status.getReplication()); m.put("replication", status.getReplication());
return includeType ? toJsonString(HdfsFileStatus.class, m) : return includeType ? toJsonString(FileStatus.class, m): JSON.toString(m);
JSON.toString(m);
} }
/** Convert a Json map to a HdfsFileStatus object. */ /** Convert a Json map to a HdfsFileStatus object. */
@ -174,8 +174,8 @@ public class JsonUtil {
} }
final Map<?, ?> m = includesType ? final Map<?, ?> m = includesType ?
(Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName()) : json; (Map<?, ?>)json.get(FileStatus.class.getSimpleName()) : json;
final String localName = (String) m.get("localName"); final String localName = (String) m.get("pathSuffix");
final PathType type = PathType.valueOf((String) m.get("type")); final PathType type = PathType.valueOf((String) m.get("type"));
final byte[] symlink = type != PathType.SYMLINK? null final byte[] symlink = type != PathType.SYMLINK? null
: DFSUtil.string2Bytes((String)m.get("symlink")); : DFSUtil.string2Bytes((String)m.get("symlink"));

View File

@ -334,14 +334,13 @@ public class WebHdfsFileSystem extends FileSystem
final URL url = toUrl(op, fspath, parameters); final URL url = toUrl(op, fspath, parameters);
//connect and get response //connect and get response
final HttpURLConnection conn = getHttpUrlConnection(url); HttpURLConnection conn = getHttpUrlConnection(url);
try { try {
conn.setRequestMethod(op.getType().toString()); conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
if (op.getDoOutput()) { if (op.getDoOutput()) {
conn.setRequestProperty("Expect", "100-Continue"); conn = twoStepWrite(conn, op);
conn.setInstanceFollowRedirects(true);
} }
conn.setDoOutput(op.getDoOutput());
conn.connect(); conn.connect();
return conn; return conn;
} catch (IOException e) { } catch (IOException e) {
@ -349,6 +348,35 @@ public class WebHdfsFileSystem extends FileSystem
throw e; throw e;
} }
} }
/**
* Two-step Create/Append:
* Step 1) Submit a Http request with neither auto-redirect nor data.
* Step 2) Submit Http PUT with the URL from the Location header with data.
*
* The reason of having two-step create/append is for preventing clients to
* send out the data before the redirect. This issue is addressed by the
* "Expect: 100-continue" header in HTTP/1.1; see RFC 2616, Section 8.2.3.
* Unfortunately, there are software library bugs (e.g. Jetty 6 http server
* and Java 6 http client), which do not correctly implement "Expect:
* 100-continue". The two-step create/append is a temporary workaround for
* the software library bugs.
*/
private static HttpURLConnection twoStepWrite(HttpURLConnection conn,
final HttpOpParam.Op op) throws IOException {
//Step 1) Submit a Http request with neither auto-redirect nor data.
conn.setInstanceFollowRedirects(false);
conn.setDoOutput(false);
conn.connect();
validateResponse(HttpOpParam.TemporaryRedirectOp.valueOf(op), conn);
final String redirect = conn.getHeaderField("Location");
conn.disconnect();
//Step 2) Submit Http PUT with the URL from the Location header with data.
conn = (HttpURLConnection)new URL(redirect).openConnection();
conn.setRequestMethod(op.getType().toString());
return conn;
}
/** /**
* Run a http operation. * Run a http operation.
@ -626,8 +654,8 @@ public class WebHdfsFileSystem extends FileSystem
final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS; final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
final Map<?, ?> json = run(op, f); final Map<?, ?> json = run(op, f);
final Map<?, ?> rootmap = (Map<?, ?>)json.get(HdfsFileStatus.class.getSimpleName() + "es"); final Map<?, ?> rootmap = (Map<?, ?>)json.get(FileStatus.class.getSimpleName() + "es");
final Object[] array = (Object[])rootmap.get(HdfsFileStatus.class.getSimpleName()); final Object[] array = (Object[])rootmap.get(FileStatus.class.getSimpleName());
//convert FileStatus //convert FileStatus
final FileStatus[] statuses = new FileStatus[array.length]; final FileStatus[] statuses = new FileStatus[array.length];

View File

@ -17,6 +17,9 @@
*/ */
package org.apache.hadoop.hdfs.web.resources; package org.apache.hadoop.hdfs.web.resources;
import javax.ws.rs.core.Response;
/** Http operation parameter. */ /** Http operation parameter. */
public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op> public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
extends EnumParam<E> { extends EnumParam<E> {
@ -46,6 +49,49 @@ public abstract class HttpOpParam<E extends Enum<E> & HttpOpParam.Op>
public String toQueryString(); public String toQueryString();
} }
/** Expects HTTP response 307 "Temporary Redirect". */
public static class TemporaryRedirectOp implements Op {
static final TemporaryRedirectOp CREATE = new TemporaryRedirectOp(PutOpParam.Op.CREATE);
static final TemporaryRedirectOp APPEND = new TemporaryRedirectOp(PostOpParam.Op.APPEND);
/** Get an object for the given op. */
public static TemporaryRedirectOp valueOf(final Op op) {
if (op == CREATE.op) {
return CREATE;
} if (op == APPEND.op) {
return APPEND;
}
throw new IllegalArgumentException(op + " not found.");
}
private final Op op;
private TemporaryRedirectOp(final Op op) {
this.op = op;
}
@Override
public Type getType() {
return op.getType();
}
@Override
public boolean getDoOutput() {
return op.getDoOutput();
}
/** Override the original expected response with "Temporary Redirect". */
@Override
public int getExpectedHttpResponseCode() {
return Response.Status.TEMPORARY_REDIRECT.getStatusCode();
}
@Override
public String toQueryString() {
return op.toQueryString();
}
}
HttpOpParam(final Domain<E> domain, final E value) { HttpOpParam(final Domain<E> domain, final E value) {
super(domain, value); super(domain, value);
} }