diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index e7af8dccdf7..4ae48814cc7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -81,6 +81,9 @@ Release 2.8.0 - UNRELEASED HDFS-8046. Allow better control of getContentSummary (kihwal) + HDFS-8076. Code cleanup for DFSInputStream: use offset instead of + LocatedBlock when possible. (Zhe Zhang via wang) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java index cf8015f0493..a9f274683c7 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java @@ -1045,16 +1045,16 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, return errMsgr.toString(); } - private void fetchBlockByteRange(LocatedBlock block, long start, long end, + private void fetchBlockByteRange(long blockStartOffset, long start, long end, byte[] buf, int offset, Map> corruptedBlockMap) throws IOException { - block = getBlockAt(block.getStartOffset()); + LocatedBlock block = getBlockAt(blockStartOffset); while (true) { DNAddrPair addressPair = chooseDataNode(block, null); try { - actualGetFromOneDataNode(addressPair, block, start, end, buf, offset, - corruptedBlockMap); + actualGetFromOneDataNode(addressPair, blockStartOffset, start, end, + buf, offset, corruptedBlockMap); return; } catch (IOException e) { // Ignore. Already processed inside the function. @@ -1064,7 +1064,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, } private Callable getFromOneDataNode(final DNAddrPair datanode, - final LocatedBlock block, final long start, final long end, + final long blockStartOffset, final long start, final long end, final ByteBuffer bb, final Map> corruptedBlockMap, final int hedgedReadId) { @@ -1077,8 +1077,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, TraceScope scope = Trace.startSpan("hedgedRead" + hedgedReadId, parentSpan); try { - actualGetFromOneDataNode(datanode, block, start, end, buf, offset, - corruptedBlockMap); + actualGetFromOneDataNode(datanode, blockStartOffset, start, end, buf, + offset, corruptedBlockMap); return bb; } finally { scope.close(); @@ -1088,7 +1088,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, } private void actualGetFromOneDataNode(final DNAddrPair datanode, - LocatedBlock block, final long start, final long end, byte[] buf, + long blockStartOffset, final long start, final long end, byte[] buf, int offset, Map> corruptedBlockMap) throws IOException { DFSClientFaultInjector.get().startFetchFromDatanode(); @@ -1101,7 +1101,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, // start of the loop. CachingStrategy curCachingStrategy; boolean allowShortCircuitLocalReads; - block = getBlockAt(block.getStartOffset()); + LocatedBlock block = getBlockAt(blockStartOffset); synchronized(infoLock) { curCachingStrategy = cachingStrategy; allowShortCircuitLocalReads = !shortCircuitForbidden(); @@ -1189,7 +1189,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, * if the first read is taking longer than configured amount of * time. We then wait on which ever read returns first. */ - private void hedgedFetchBlockByteRange(LocatedBlock block, long start, + private void hedgedFetchBlockByteRange(long blockStartOffset, long start, long end, byte[] buf, int offset, Map> corruptedBlockMap) throws IOException { @@ -1201,7 +1201,7 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, ByteBuffer bb = null; int len = (int) (end - start + 1); int hedgedReadId = 0; - block = getBlockAt(block.getStartOffset()); + LocatedBlock block = getBlockAt(blockStartOffset); while (true) { // see HDFS-6591, this metric is used to verify/catch unnecessary loops hedgedReadOpsLoopNumForTesting++; @@ -1213,8 +1213,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, chosenNode = chooseDataNode(block, ignored); bb = ByteBuffer.wrap(buf, offset, len); Callable getFromDataNodeCallable = getFromOneDataNode( - chosenNode, block, start, end, bb, corruptedBlockMap, - hedgedReadId++); + chosenNode, block.getStartOffset(), start, end, bb, + corruptedBlockMap, hedgedReadId++); Future firstRequest = hedgedService .submit(getFromDataNodeCallable); futures.add(firstRequest); @@ -1251,8 +1251,8 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, } bb = ByteBuffer.allocate(len); Callable getFromDataNodeCallable = getFromOneDataNode( - chosenNode, block, start, end, bb, corruptedBlockMap, - hedgedReadId++); + chosenNode, block.getStartOffset(), start, end, bb, + corruptedBlockMap, hedgedReadId++); Future oneMoreRequest = hedgedService .submit(getFromDataNodeCallable); futures.add(oneMoreRequest); @@ -1405,11 +1405,13 @@ implements ByteBufferReadable, CanSetDropBehind, CanSetReadahead, long bytesToRead = Math.min(remaining, blk.getBlockSize() - targetStart); try { if (dfsClient.isHedgedReadsEnabled()) { - hedgedFetchBlockByteRange(blk, targetStart, targetStart + bytesToRead - - 1, buffer, offset, corruptedBlockMap); + hedgedFetchBlockByteRange(blk.getStartOffset(), targetStart, + targetStart + bytesToRead - 1, buffer, offset, + corruptedBlockMap); } else { - fetchBlockByteRange(blk, targetStart, targetStart + bytesToRead - 1, - buffer, offset, corruptedBlockMap); + fetchBlockByteRange(blk.getStartOffset(), targetStart, + targetStart + bytesToRead - 1, buffer, offset, + corruptedBlockMap); } } finally { // Check and report if any block replicas are corrupted.