HDFS-15967. Improve the log for Short Circuit Local Reads. Contributed by Bhavik Patel.

This commit is contained in:
Takanobu Asanuma 2021-04-26 13:29:28 +09:00
parent 2621d3f15b
commit 01bad0e92a
2 changed files with 24 additions and 44 deletions

View File

@ -1129,7 +1129,7 @@ public class DataNode extends ReconfigurableBase
directoryScanner = new DirectoryScanner(data, conf);
directoryScanner.start();
} else {
LOG.info("Periodic Directory Tree Verification scan " +
LOG.warn("Periodic Directory Tree Verification scan " +
"is disabled because {}",
reason);
}
@ -1331,21 +1331,6 @@ public class DataNode extends ReconfigurableBase
}
}
/**
* Try to send an error report to the NNs associated with the given
* block pool.
* @param bpid the block pool ID
* @param errCode error code to send
* @param errMsg textual message to send
*/
void trySendErrorReport(String bpid, int errCode, String errMsg) {
BPOfferService bpos = blockPoolManager.get(bpid);
if (bpos == null) {
throw new IllegalArgumentException("Bad block pool: " + bpid);
}
bpos.trySendErrorReport(errCode, errMsg);
}
/**
* Return the BPOfferService instance corresponding to the given block.
* @return the BPOS
@ -2033,7 +2018,7 @@ public class DataNode extends ReconfigurableBase
ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
DataInputStream in = new DataInputStream(buf);
id.readFields(in);
LOG.debug("Got: {}", id);
LOG.debug("BlockTokenIdentifier id: {}", id);
blockPoolTokenSecretManager.checkAccess(id, null, block, accessMode,
null, null);
}
@ -2256,8 +2241,8 @@ public class DataNode extends ReconfigurableBase
return; // do not shutdown
}
LOG.warn("DataNode is shutting down due to failed volumes: ["
+ failedVolumes + "]");
LOG.warn("DataNode is shutting down due to failed volumes: [{}]",
failedVolumes);
shouldRun = false;
}
@ -2299,7 +2284,7 @@ public class DataNode extends ReconfigurableBase
curCount.put("networkErrors", curCount.get("networkErrors") + 1L);
datanodeNetworkCounts.put(host, curCount);
} catch (ExecutionException e) {
LOG.warn("failed to increment network error counts for " + host);
LOG.warn("failed to increment network error counts for host: {}", host);
}
}
}
@ -2349,7 +2334,7 @@ public class DataNode extends ReconfigurableBase
final ExtendedBlock block, final String msg) {
FsVolumeSpi volume = getFSDataset().getVolume(block);
if (volume == null) {
LOG.warn("Cannot find FsVolumeSpi to report bad block: " + block);
LOG.warn("Cannot find FsVolumeSpi to report bad block: {}", block);
return;
}
bpos.reportBadBlocks(
@ -2430,7 +2415,7 @@ public class DataNode extends ReconfigurableBase
transferBlock(new ExtendedBlock(poolId, blocks[i]), xferTargets[i],
xferTargetStorageTypes[i], xferTargetStorageIDs[i]);
} catch (IOException ie) {
LOG.warn("Failed to transfer block " + blocks[i], ie);
LOG.warn("Failed to transfer block {}", blocks[i], ie);
}
}
}
@ -2549,15 +2534,13 @@ public class DataNode extends ReconfigurableBase
DataTransfer(DatanodeInfo targets[], StorageType[] targetStorageTypes,
String[] targetStorageIds, ExtendedBlock b,
BlockConstructionStage stage, final String clientname) {
if (DataTransferProtocol.LOG.isDebugEnabled()) {
DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
"clientname={}, targets={}, target storage types={}, " +
"target storage IDs={}", getClass().getSimpleName(), b,
b.getNumBytes(), stage, clientname, Arrays.asList(targets),
targetStorageTypes == null ? "[]" :
Arrays.asList(targetStorageTypes),
targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
}
DataTransferProtocol.LOG.debug("{}: {} (numBytes={}), stage={}, " +
"clientname={}, targets={}, target storage types={}, " +
"target storage IDs={}", getClass().getSimpleName(), b,
b.getNumBytes(), stage, clientname, Arrays.asList(targets),
targetStorageTypes == null ? "[]" :
Arrays.asList(targetStorageTypes),
targetStorageIds == null ? "[]" : Arrays.asList(targetStorageIds));
this.targets = targets;
this.targetStorageTypes = targetStorageTypes;
this.targetStorageIds = targetStorageIds;
@ -2661,7 +2644,7 @@ public class DataNode extends ReconfigurableBase
LOG.warn("{}:Failed to transfer {} to {} got",
bpReg, b, targets[0], ie);
} catch (Throwable t) {
LOG.error("Failed to transfer block " + b, t);
LOG.error("Failed to transfer block {}", b, t);
} finally {
decrementXmitsInProgress();
IOUtils.closeStream(blockSender);
@ -3103,7 +3086,7 @@ public class DataNode extends ReconfigurableBase
}
for (TokenIdentifier tokenId : tokenIds) {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
LOG.debug("Got: {}", id);
LOG.debug("BlockTokenIdentifier: {}", id);
blockPoolTokenSecretManager.checkAccess(id, null, block,
BlockTokenIdentifier.AccessMode.READ, null, null);
}
@ -3143,8 +3126,10 @@ public class DataNode extends ReconfigurableBase
b.setGenerationStamp(storedGS);
if (data.isValidRbw(b)) {
stage = BlockConstructionStage.TRANSFER_RBW;
LOG.debug("Replica is being written!");
} else if (data.isValidBlock(b)) {
stage = BlockConstructionStage.TRANSFER_FINALIZED;
LOG.debug("Replica is finalized!");
} else {
final String r = data.getReplicaString(b.getBlockPoolId(), b.getBlockId());
throw new IOException(b + " is neither a RBW nor a Finalized, r=" + r);

View File

@ -2262,9 +2262,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
datanode.checkDiskErrorAsync(r.getVolume());
}
if (LOG.isDebugEnabled()) {
LOG.debug("blockId=" + blockId + ", replica=" + r);
}
LOG.debug("blockId={}, replica={}", blockId, r);
return null;
}
@ -2334,15 +2332,12 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
continue;
}
} catch(IllegalArgumentException e) {
LOG.warn("Parent directory check failed; replica " + info
+ " is not backed by a local file");
LOG.warn("Parent directory check failed; replica {} is " +
"not backed by a local file", info);
}
removing = volumeMap.remove(bpid, invalidBlks[i]);
addDeletingBlock(bpid, removing.getBlockId());
if (LOG.isDebugEnabled()) {
LOG.debug("Block file " + removing.getBlockURI()
+ " is to be deleted");
}
LOG.debug("Block file {} is to be deleted", removing.getBlockURI());
if (removing instanceof ReplicaInPipeline) {
((ReplicaInPipeline) removing).releaseAllBytesReserved();
}
@ -2383,8 +2378,8 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
dataStorage.getTrashDirectoryForReplica(bpid, removing));
}
} catch (ClosedChannelException e) {
LOG.warn("Volume " + v + " is closed, ignore the deletion task for " +
"block " + invalidBlks[i]);
LOG.warn("Volume {} is closed, ignore the deletion task for " +
"block: {}", v, invalidBlks[i]);
}
}
if (!errors.isEmpty()) {