HDFS-9185. Fix null tracer in ErasureCodingWorker. Contributed by Rakesh R.

This commit is contained in:
Jing Zhao 2015-10-02 11:08:17 -07:00
parent 439f43ad3d
commit c6cafc77e6
7 changed files with 21 additions and 9 deletions

View File

@ -222,11 +222,14 @@ public static StripingChunkReadResult getNextCompletedStripedRead(
} }
} catch (ExecutionException e) { } catch (ExecutionException e) {
if (DFSClient.LOG.isDebugEnabled()) { if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("ExecutionException " + e); DFSClient.LOG.debug("Exception during striped read task", e);
} }
return new StripingChunkReadResult(futures.remove(future), return new StripingChunkReadResult(futures.remove(future),
StripingChunkReadResult.FAILED); StripingChunkReadResult.FAILED);
} catch (CancellationException e) { } catch (CancellationException e) {
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Exception during striped read task", e);
}
return new StripingChunkReadResult(futures.remove(future), return new StripingChunkReadResult(futures.remove(future),
StripingChunkReadResult.CANCELLED); StripingChunkReadResult.CANCELLED);
} }

View File

@ -453,3 +453,5 @@
HDFS-9040. Erasure coding: coordinate data streamers in HDFS-9040. Erasure coding: coordinate data streamers in
DFSStripedOutputStream. (jing9 and Walter Su) DFSStripedOutputStream. (jing9 and Walter Su)
HDFS-9185. Fix null tracer in ErasureCodingWorker. (Rakesh R via jing9)

View File

@ -707,7 +707,7 @@ public void verifyChecksum(final byte[] buf, final int dataOffset,
*/ */
long sendBlock(DataOutputStream out, OutputStream baseStream, long sendBlock(DataOutputStream out, OutputStream baseStream,
DataTransferThrottler throttler) throws IOException { DataTransferThrottler throttler) throws IOException {
TraceScope scope = datanode.tracer. final TraceScope scope = datanode.getTracer().
newScope("sendBlock_" + block.getBlockId()); newScope("sendBlock_" + block.getBlockId());
try { try {
return doSendBlock(out, baseStream, throttler); return doSendBlock(out, baseStream, throttler);

View File

@ -369,7 +369,7 @@ public static InetSocketAddress createSocketAddr(String target) {
private boolean isPermissionEnabled; private boolean isPermissionEnabled;
private String dnUserName = null; private String dnUserName = null;
private ErasureCodingWorker ecWorker; private ErasureCodingWorker ecWorker;
final Tracer tracer; private final Tracer tracer;
private final TracerConfigurationManager tracerConfigurationManager; private final TracerConfigurationManager tracerConfigurationManager;
private static final int NUM_CORES = Runtime.getRuntime() private static final int NUM_CORES = Runtime.getRuntime()
.availableProcessors(); .availableProcessors();
@ -3395,4 +3395,8 @@ protected void stopMetricsLogger() {
ScheduledThreadPoolExecutor getMetricsLoggerTimer() { ScheduledThreadPoolExecutor getMetricsLoggerTimer() {
return metricsLoggerTimer; return metricsLoggerTimer;
} }
public Tracer getTracer() {
return tracer;
}
} }

View File

@ -126,7 +126,7 @@ public static DataXceiver create(Peer peer, DataNode dn,
private DataXceiver(Peer peer, DataNode datanode, private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException { DataXceiverServer dataXceiverServer) throws IOException {
super(datanode.tracer); super(datanode.getTracer());
this.peer = peer; this.peer = peer;
this.dnConf = datanode.getDnConf(); this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream(); this.socketIn = peer.getInputStream();

View File

@ -50,11 +50,9 @@
import org.apache.hadoop.hdfs.BlockReader; import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSPacket; import org.apache.hadoop.hdfs.DFSPacket;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient; import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.RemoteBlockReader2; import org.apache.hadoop.hdfs.RemoteBlockReader2;
import org.apache.hadoop.hdfs.net.Peer; import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -818,7 +816,7 @@ private BlockReader newBlockReader(final ExtendedBlock block,
"dummy", block, blockToken, offsetInBlock, "dummy", block, blockToken, offsetInBlock,
block.getNumBytes() - offsetInBlock, true, block.getNumBytes() - offsetInBlock, true,
"", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo, "", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo,
null, cachingStrategy, null); null, cachingStrategy, datanode.getTracer());
} catch (IOException e) { } catch (IOException e) {
return null; return null;
} }

View File

@ -38,7 +38,6 @@
import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo; import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks; import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock; import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil; import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
@ -50,6 +49,8 @@
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage; import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo; import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
import org.apache.hadoop.hdfs.util.StripedBlockUtil; import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After; import org.junit.After;
import org.junit.Assert; import org.junit.Assert;
import org.junit.Before; import org.junit.Before;
@ -65,6 +66,10 @@ public class TestRecoverStripedFile {
private static final int groupSize = dataBlkNum + parityBlkNum; private static final int groupSize = dataBlkNum + parityBlkNum;
private static final int dnNum = groupSize + parityBlkNum; private static final int dnNum = groupSize + parityBlkNum;
static {
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
private MiniDFSCluster cluster; private MiniDFSCluster cluster;
private Configuration conf; private Configuration conf;
private DistributedFileSystem fs; private DistributedFileSystem fs;