svn merge -c 1587954 FIXES: HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1587955 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Daryn Sharp 2014-04-16 16:06:28 +00:00
parent 8f0bc218b6
commit 46738ee92b
3 changed files with 25 additions and 3 deletions

View File

@ -51,6 +51,8 @@ Release 2.5.0 - UNRELEASED
OPTIMIZATIONS
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
BUG FIXES
HDFS-6112. NFS Gateway docs are incorrect for allowed hosts configuration.

View File

@ -430,10 +430,18 @@ public class DatanodeWebHdfsMethods {
Math.min(length.getValue(), in.getVisibleLength() - offset.getValue()) :
in.getVisibleLength() - offset.getValue();
// jetty 6 reserves 12 bytes in the out buffer for chunked responses
// (file length > 2GB) which causes extremely poor performance when
// 12 bytes of the output spill into another buffer which results
// in a big and little write
int outBufferSize = response.getBufferSize();
if (n > Integer.MAX_VALUE) {
outBufferSize -= 12;
}
/**
* Allow the Web UI to perform an AJAX request to get the data.
*/
return Response.ok(new OpenEntity(in, n, dfsclient))
return Response.ok(new OpenEntity(in, n, outBufferSize, dfsclient))
.type(MediaType.APPLICATION_OCTET_STREAM)
.header("Access-Control-Allow-Methods", "GET")
.header("Access-Control-Allow-Origin", "*")

View File

@ -37,12 +37,14 @@ import org.apache.hadoop.io.IOUtils;
public class OpenEntity {
private final HdfsDataInputStream in;
private final long length;
private final int outBufferSize;
private final DFSClient dfsclient;
OpenEntity(final HdfsDataInputStream in, final long length,
final DFSClient dfsclient) {
final int outBufferSize, final DFSClient dfsclient) {
this.in = in;
this.length = length;
this.outBufferSize = outBufferSize;
this.dfsclient = dfsclient;
}
@ -71,7 +73,17 @@ public class OpenEntity {
MultivaluedMap<String, Object> httpHeaders, OutputStream out
) throws IOException {
try {
IOUtils.copyBytes(e.in, out, e.length, false);
byte[] buf = new byte[e.outBufferSize];
long remaining = e.length;
while (remaining > 0) {
int read = e.in.read(buf, 0, (int)Math.min(buf.length, remaining));
if (read == -1) { // EOF
break;
}
out.write(buf, 0, read);
out.flush();
remaining -= read;
}
} finally {
IOUtils.cleanup(DatanodeWebHdfsMethods.LOG, e.in);
IOUtils.cleanup(DatanodeWebHdfsMethods.LOG, e.dfsclient);