HDFS-15967. Improve the log for Short Circuit Local Reads. Contributed by Bhavik Patel.
(cherry picked from commit 01bad0e92a
)
This commit is contained in:
parent
99dfd3b2d0
commit
a5f038b3f6
|
@ -1113,7 +1113,7 @@ public class DataNode extends ReconfigurableBase
|
|||
directoryScanner = new DirectoryScanner(data, conf);
|
||||
directoryScanner.start();
|
||||
} else {
|
||||
LOG.info("Periodic Directory Tree Verification scan " +
|
||||
LOG.warn("Periodic Directory Tree Verification scan " +
|
||||
"is disabled because {}",
|
||||
reason);
|
||||
}
|
||||
|
@ -1315,21 +1315,6 @@ public class DataNode extends ReconfigurableBase
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Try to send an error report to the NNs associated with the given
|
||||
* block pool.
|
||||
* @param bpid the block pool ID
|
||||
* @param errCode error code to send
|
||||
* @param errMsg textual message to send
|
||||
*/
|
||||
void trySendErrorReport(String bpid, int errCode, String errMsg) {
|
||||
BPOfferService bpos = blockPoolManager.get(bpid);
|
||||
if (bpos == null) {
|
||||
throw new IllegalArgumentException("Bad block pool: " + bpid);
|
||||
}
|
||||
bpos.trySendErrorReport(errCode, errMsg);
|
||||
}
|
||||
|
||||
/**
|
||||
* Return the BPOfferService instance corresponding to the given block.
|
||||
* @return the BPOS
|
||||
|
@ -2017,7 +2002,7 @@ public class DataNode extends ReconfigurableBase
|
|||
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
|
||||
DataInputStream in = new DataInputStream(buf);
|
||||
id.readFields(in);
|
||||
LOG.debug("Got: {}", id);
|
||||
LOG.debug("BlockTokenIdentifier id: {}", id);
|
||||
blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
|
||||
null, null);
|
||||
}
|
||||
|
@ -2240,8 +2225,8 @@ public class DataNode extends ReconfigurableBase
|
|||
return; // do not shutdown
|
||||
}
|
||||
|
||||
LOG.warn("DataNode is shutting down due to failed volumes: ["
|
||||
+ failedVolumes + "]");
|
||||
LOG.warn("DataNode is shutting down due to failed volumes: [{}]",
|
||||
failedVolumes);
|
||||
shouldRun = false;
|
||||
}
|
||||
|
||||
|
@ -2283,7 +2268,7 @@ public class DataNode extends ReconfigurableBase
|
|||
curCount.put("networkErrors", curCount.get("networkErrors") + 1L);
|
||||
datanodeNetworkCounts.put(host, curCount);
|
||||
} catch (ExecutionException e) {
|
||||
LOG.warn("failed to increment network error counts for " + host);
|
||||
LOG.warn("failed to increment network error counts for host: {}", host);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2333,7 +2318,7 @@ public class DataNode extends ReconfigurableBase
|
|||
final ExtendedBlock block, final String msg) {
|
||||
FsVolumeSpi volume = getFSDataset().getVolume(block);
|
||||
if (volume == null) {
|
||||
LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
|
||||
LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
|
||||
return;
|
||||
}
|
||||
bpos.reportBadBlocks(
|
||||
|
@ -2414,7 +2399,7 @@ public class DataNode extends ReconfigurableBase
|
|||
transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
|
||||
xferTargetStorageTypes[i], xferTargetStorageIDs[i]);
|
||||
} catch (IOException ie) {
|
||||
LOG.warn("Failed to transfer block " + blocks[i], ie);
|
||||
LOG.warn("Failed to transfer block {}", blocks[i], ie);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2533,15 +2518,13 @@ public class DataNode extends ReconfigurableBase
|
|||
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
|
||||
String[] targetStorageIds, ExtendedBlock b,
|
||||
BlockConstructionStage stage, final String clientname) {
|
||||
if (DataTransferProtocol.LOG.isDebugEnabled()) {
|
||||
DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
|
||||
"clientname={}, targets={}, target storage types={}, " +
|
||||
"target storage IDs={}", getClass().getSimpleName(), b,
|
||||
b.getNumBytes(), stage, clientname, Arrays.asList(targets),
|
||||
targetStorageTypes == null ? "[]" :
|
||||
Arrays.asList(targetStorageTypes),
|
||||
targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
|
||||
}
|
||||
DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
|
||||
"clientname={}, targets={}, target storage types={}, " +
|
||||
"target storage IDs={}", getClass().getSimpleName(), b,
|
||||
b.getNumBytes(), stage, clientname, Arrays.asList(targets),
|
||||
targetStorageTypes == null ? "[]" :
|
||||
Arrays.asList(targetStorageTypes),
|
||||
targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
|
||||
this.targets = targets;
|
||||
this.targetStorageTypes = targetStorageTypes;
|
||||
this.targetStorageIds = targetStorageIds;
|
||||
|
@ -2645,7 +2628,7 @@ public class DataNode extends ReconfigurableBase
|
|||
LOG.warn("{}:Failed to transfer {} to {} got",
|
||||
bpReg, b, targets[0], ie);
|
||||
} catch (Throwable t) {
|
||||
LOG.error("Failed to transfer block " + b, t);
|
||||
LOG.error("Failed to transfer block {}", b, t);
|
||||
} finally {
|
||||
decrementXmitsInProgress();
|
||||
IOUtils.closeStream(blockSender);
|
||||
|
@ -3037,7 +3020,7 @@ public class DataNode extends ReconfigurableBase
|
|||
}
|
||||
for (TokenIdentifier tokenId : tokenIds) {
|
||||
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
|
||||
LOG.debug("Got: {}", id);
|
||||
LOG.debug("BlockTokenIdentifier: {}", id);
|
||||
blockPoolTokenSecretManager.checkAccess(id, null, block,
|
||||
BlockTokenIdentifier.AccessMode.READ, null, null);
|
||||
}
|
||||
|
@ -3077,8 +3060,10 @@ public class DataNode extends ReconfigurableBase
|
|||
b.setGenerationStamp(storedGS);
|
||||
if (data.isValidRbw(b)) {
|
||||
stage = BlockConstructionStage.TRANSFER_RBW;
|
||||
LOG.debug("Replica is being written!");
|
||||
} else if (data.isValidBlock(b)) {
|
||||
stage = BlockConstructionStage.TRANSFER_FINALIZED;
|
||||
LOG.debug("Replica is finalized!");
|
||||
} else {
|
||||
final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
|
||||
throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);
|
||||
|
|
|
@ -2068,9 +2068,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
datanode.checkDiskErrorAsync(r.getVolume());
|
||||
}
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("blockId=" + blockId + ", replica=" + r);
|
||||
}
|
||||
LOG.debug("blockId={}, replica={}", blockId, r);
|
||||
return null;
|
||||
}
|
||||
|
||||
|
@ -2140,15 +2138,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
continue;
|
||||
}
|
||||
} catch(IllegalArgumentException e) {
|
||||
LOG.warn("Parent directory check failed; replica " + info
|
||||
+ " is not backed by a local file");
|
||||
LOG.warn("Parent directory check failed; replica {} is " +
|
||||
"not backed by a local file", info);
|
||||
}
|
||||
removing = volumeMap.remove(bpid, invalidBlks[i]);
|
||||
addDeletingBlock(bpid, removing.getBlockId());
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Block file " + removing.getBlockURI()
|
||||
+ " is to be deleted");
|
||||
}
|
||||
LOG.debug("Block file {} is to be deleted", removing.getBlockURI());
|
||||
if (removing instanceof ReplicaInPipeline) {
|
||||
((ReplicaInPipeline) removing).releaseAllBytesReserved();
|
||||
}
|
||||
|
@ -2189,8 +2184,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
|
|||
dataStorage.getTrashDirectoryForReplica(bpid, removing));
|
||||
}
|
||||
} catch (ClosedChannelException e) {
|
||||
LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
|
||||
"block " + invalidBlks[i]);
|
||||
LOG.warn("Volume {} is closed, ignore the deletion task for " +
|
||||
"block: {}", v, invalidBlks[i]);
|
||||
}
|
||||
}
|
||||
if (!errors.isEmpty()) {
|
||||
|
|
Loading…
Reference in New Issue