HDFS-6529. Trace logging for RemoteBlockReader2 to identify remote datanode and file being read. Contributed by Anubhav Dhoot.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1602539 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
9313bbd06e
commit
018793faa7
|
@ -180,6 +180,9 @@ Release 2.5.0 - UNRELEASED
|
|||
HDFS-6470. TestBPOfferService.testBPInitErrorHandling is flaky.
|
||||
(Ming Ma via wang)
|
||||
|
||||
HDFS-6529. Trace logging for RemoteBlockReader2 to identify remote datanode
|
||||
and file being read. (Anubhav Dhoot via atm)
|
||||
|
||||
OPTIMIZATIONS
|
||||
|
||||
HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.net.InetSocketAddress;
|
|||
import java.nio.ByteBuffer;
|
||||
import java.nio.channels.ReadableByteChannel;
|
||||
import java.util.EnumSet;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -133,9 +134,22 @@ public class RemoteBlockReader2 implements BlockReader {
|
|||
public synchronized int read(byte[] buf, int off, int len)
|
||||
throws IOException {
|
||||
|
||||
UUID randomId = null;
|
||||
if (LOG.isTraceEnabled()) {
|
||||
randomId = UUID.randomUUID();
|
||||
LOG.trace(String.format("Starting read #%s file %s from datanode %s",
|
||||
randomId.toString(), this.filename,
|
||||
this.datanodeID.getHostName()));
|
||||
}
|
||||
|
||||
if (curDataSlice == null || curDataSlice.remaining() == 0 && bytesNeededToFinish > 0) {
|
||||
readNextPacket();
|
||||
}
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace(String.format("Finishing read #" + randomId));
|
||||
}
|
||||
|
||||
if (curDataSlice.remaining() == 0) {
|
||||
// we're at EOF now
|
||||
return -1;
|
||||
|
|
Loading…
Reference in New Issue