diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index 19dde1f0d3b..2940f35307c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1249,7 +1249,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, // chooseDataNode is a commitment. If no node, we go to // the NN to reget block locations. Only go here on first read. chosenNode = chooseDataNode(block, ignored); - bb = ByteBuffer.wrap(buf, offset, len); + bb = ByteBuffer.allocate(len); Callable getFromDataNodeCallable = getFromOneDataNode( chosenNode, block, start, end, bb, corruptedBlockMap, hedgedReadId++); @@ -1260,7 +1260,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, Future future = hedgedService.poll( dfsClient.getHedgedReadTimeout(), TimeUnit.MILLISECONDS); if (future != null) { - future.get(); + ByteBuffer result = future.get(); + System.arraycopy(result.array(), result.position(), buf, offset, + len); return; } if (DFSClient.LOG.isDebugEnabled()) { @@ -1306,13 +1308,9 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, ByteBuffer result = getFirstToComplete(hedgedService, futures); // cancel the rest. cancelAll(futures); - if (result.array() != buf) { // compare the array pointers - dfsClient.getHedgedReadMetrics().incHedgedReadWins(); - System.arraycopy(result.array(), result.position(), buf, offset, - len); - } else { - dfsClient.getHedgedReadMetrics().incHedgedReadOps(); - } + dfsClient.getHedgedReadMetrics().incHedgedReadWins(); + System.arraycopy(result.array(), result.position(), buf, offset, + len); return; } catch (InterruptedException ie) { // Ignore and retry