HDFS-9185. Fix null tracer in ErasureCodingWorker. Contributed by Rakesh R.
This commit is contained in:
parent
439f43ad3d
commit
c6cafc77e6
@ -222,11 +222,14 @@ public static StripingChunkReadResult getNextCompletedStripedRead(
|
||||
}
|
||||
} catch (ExecutionException e) {
|
||||
if (DFSClient.LOG.isDebugEnabled()) {
|
||||
DFSClient.LOG.debug("ExecutionException " + e);
|
||||
DFSClient.LOG.debug("Exception during striped read task", e);
|
||||
}
|
||||
return new StripingChunkReadResult(futures.remove(future),
|
||||
StripingChunkReadResult.FAILED);
|
||||
} catch (CancellationException e) {
|
||||
if (DFSClient.LOG.isDebugEnabled()) {
|
||||
DFSClient.LOG.debug("Exception during striped read task", e);
|
||||
}
|
||||
return new StripingChunkReadResult(futures.remove(future),
|
||||
StripingChunkReadResult.CANCELLED);
|
||||
}
|
||||
|
@ -453,3 +453,5 @@
|
||||
|
||||
HDFS-9040. Erasure coding: coordinate data streamers in
|
||||
DFSStripedOutputStream. (jing9 and Walter Su)
|
||||
|
||||
HDFS-9185. Fix null tracer in ErasureCodingWorker. (Rakesh R via jing9)
|
||||
|
@ -707,7 +707,7 @@ public void verifyChecksum(final byte[] buf, final int dataOffset,
|
||||
*/
|
||||
long sendBlock(DataOutputStream out, OutputStream baseStream,
|
||||
DataTransferThrottler throttler) throws IOException {
|
||||
TraceScope scope = datanode.tracer.
|
||||
final TraceScope scope = datanode.getTracer().
|
||||
newScope("sendBlock_" + block.getBlockId());
|
||||
try {
|
||||
return doSendBlock(out, baseStream, throttler);
|
||||
|
@ -369,7 +369,7 @@ public static InetSocketAddress createSocketAddr(String target) {
|
||||
private boolean isPermissionEnabled;
|
||||
private String dnUserName = null;
|
||||
private ErasureCodingWorker ecWorker;
|
||||
final Tracer tracer;
|
||||
private final Tracer tracer;
|
||||
private final TracerConfigurationManager tracerConfigurationManager;
|
||||
private static final int NUM_CORES = Runtime.getRuntime()
|
||||
.availableProcessors();
|
||||
@ -3395,4 +3395,8 @@ protected void stopMetricsLogger() {
|
||||
ScheduledThreadPoolExecutor getMetricsLoggerTimer() {
|
||||
return metricsLoggerTimer;
|
||||
}
|
||||
|
||||
public Tracer getTracer() {
|
||||
return tracer;
|
||||
}
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ public static DataXceiver create(Peer peer, DataNode dn,
|
||||
|
||||
private DataXceiver(Peer peer, DataNode datanode,
|
||||
DataXceiverServer dataXceiverServer) throws IOException {
|
||||
super(datanode.tracer);
|
||||
super(datanode.getTracer());
|
||||
this.peer = peer;
|
||||
this.dnConf = datanode.getDnConf();
|
||||
this.socketIn = peer.getInputStream();
|
||||
|
@ -50,11 +50,9 @@
|
||||
import org.apache.hadoop.hdfs.BlockReader;
|
||||
import org.apache.hadoop.hdfs.DFSConfigKeys;
|
||||
import org.apache.hadoop.hdfs.DFSPacket;
|
||||
import org.apache.hadoop.hdfs.DFSUtil;
|
||||
import org.apache.hadoop.hdfs.DFSUtilClient;
|
||||
import org.apache.hadoop.hdfs.RemoteBlockReader2;
|
||||
import org.apache.hadoop.hdfs.net.Peer;
|
||||
import org.apache.hadoop.hdfs.net.TcpPeerServer;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
@ -818,7 +816,7 @@ private BlockReader newBlockReader(final ExtendedBlock block,
|
||||
"dummy", block, blockToken, offsetInBlock,
|
||||
block.getNumBytes() - offsetInBlock, true,
|
||||
"", newConnectedPeer(block, dnAddr, blockToken, dnInfo), dnInfo,
|
||||
null, cachingStrategy, null);
|
||||
null, cachingStrategy, datanode.getTracer());
|
||||
} catch (IOException e) {
|
||||
return null;
|
||||
}
|
||||
|
@ -38,7 +38,6 @@
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeID;
|
||||
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
|
||||
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
||||
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
||||
import org.apache.hadoop.hdfs.protocol.LocatedStripedBlock;
|
||||
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
||||
@ -50,6 +49,8 @@
|
||||
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
||||
import org.apache.hadoop.hdfs.server.protocol.BlockECRecoveryCommand.BlockECRecoveryInfo;
|
||||
import org.apache.hadoop.hdfs.util.StripedBlockUtil;
|
||||
import org.apache.hadoop.test.GenericTestUtils;
|
||||
import org.apache.log4j.Level;
|
||||
import org.junit.After;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Before;
|
||||
@ -65,6 +66,10 @@ public class TestRecoverStripedFile {
|
||||
private static final int groupSize = dataBlkNum + parityBlkNum;
|
||||
private static final int dnNum = groupSize + parityBlkNum;
|
||||
|
||||
static {
|
||||
GenericTestUtils.setLogLevel(DFSClient.LOG, Level.ALL);
|
||||
}
|
||||
|
||||
private MiniDFSCluster cluster;
|
||||
private Configuration conf;
|
||||
private DistributedFileSystem fs;
|
||||
|
Loading…
x
Reference in New Issue
Block a user