HDFS-9185. Fix null tracer in ErasureCodingWorker. Contributed by Rakesh R.

This commit is contained in:
Jing Zhao 2015-10-02 11:08:17 -07:00
parent 439f43ad3d
commit c6cafc77e6
7 changed files with 21 additions and 9 deletions

View File

@ -222,11 +222,14 @@ public class StripedBlockUtil {
}
} catch (ExecutionException e) {
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("ExecutionException " + e);
DFSClient.LOG.debug("Exception during striped read task", e);
}
return new StripingChunkReadResult(futures.remove(future),
StripingChunkReadResult.FAILED);
} catch (CancellationException e) {
if (DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Exception during striped read task", e);
}
return new StripingChunkReadResult(futures.remove(future),
StripingChunkReadResult.CANCELLED);
}

View File

@ -453,3 +453,5 @@
HDFS-9040. Erasure coding: coordinate data streamers in
DFSStripedOutputStream. (jing9 and Walter Su)
HDFS-9185. Fix null tracer in ErasureCodingWorker. (Rakesh R via jing9)

View File

@ -707,7 +707,7 @@ class BlockSender implements java.io.Closeable {
*/
long sendBlock(DataOutputStream out, OutputStream baseStream,
DataTransferThrottler throttler) throws IOException {
TraceScope scope = datanode.tracer.
final TraceScope scope = datanode.getTracer().
newScope("sendBlock_" + block.getBlockId());
try {
return doSendBlock(out, baseStream, throttler);

View File

@ -369,7 +369,7 @@ public class DataNode extends ReconfigurableBase
private boolean isPermissionEnabled;
private String dnUserName = null;
private ErasureCodingWorker ecWorker;
final Tracer tracer;
private final Tracer tracer;
private final TracerConfigurationManager tracerConfigurationManager;
private static final int NUM_CORES = Runtime.getRuntime()
.availableProcessors();
@ -3395,4 +3395,8 @@ public class DataNode extends ReconfigurableBase
ScheduledThreadPoolExecutor getMetricsLoggerTimer() {
return metricsLoggerTimer;
}
public Tracer getTracer() {
return tracer;
}
}

View File

@ -126,7 +126,7 @@ class DataXceiver extends Receiver implements Runnable {
private DataXceiver(Peer peer, DataNode datanode,
DataXceiverServer dataXceiverServer) throws IOException {
super(datanode.tracer);
super(datanode.getTracer());
this.peer = peer;
this.dnConf = datanode.getDnConf();
this.socketIn = peer.getInputStream();

View File

@ -50,11 +50,9 @@ import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.BlockReader;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSPacket;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.DFSUtilClient;
import org.apache.hadoop.hdfs.RemoteBlockReader2;
import org.apache.hadoop.hdfs.net.Peer;
import org.apache.hadoop.hdfs.net.TcpPeerServer;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@ -818,7 +816,7 @@ public final class ErasureCodingWorker {
"dummy", block, blockToken, offsetInBlock,
block.getNumBytes() - offsetInBlock, true,
"", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo,
null, cachingStrategy, null);
null, cachingStrategy, datanode.getTracer());
} catch (IOException e) {
return null;
}

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
@ -50,6 +49,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
@ -64,7 +65,11 @@ public class TestRecoverStripedFile {
private static final int blockSize = cellSize * 3;
private static final int groupSize = dataBlkNum + parityBlkNum;
private static final int dnNum = groupSize + parityBlkNum;
static {
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
}
private MiniDFSCluster cluster;
private Configuration conf;
private DistributedFileSystem fs;