HDFS-10694. processReport() should print blockReportId in each log message. Contributed by Yuanbo Liu.

(cherry picked from commit 6dfa4fb16e449a248f2f3cf43a6be59dd8e1ac78)
This commit is contained in:
Yuanbo Liu 2016-08-10 10:48:42 -07:00 committed by Konstantin V Shvachko
parent f5754fe5bc
commit e88546596d
1 changed files with 24 additions and 13 deletions

View File

@ -1807,6 +1807,8 @@ public class BlockManager {
final long endTime; final long endTime;
DatanodeDescriptor node; DatanodeDescriptor node;
Collection<Block> invalidatedBlocks = null; Collection<Block> invalidatedBlocks = null;
String strBlockReportId =
context != null ? Long.toHexString(context.getReportId()) : "";
try { try {
node = datanodeManager.getDatanode(nodeID); node = datanodeManager.getDatanode(nodeID);
@ -1825,9 +1827,10 @@ public class BlockManager {
} }
if (namesystem.isInStartupSafeMode() if (namesystem.isInStartupSafeMode()
&& storageInfo.getBlockReportCount() > 0) { && storageInfo.getBlockReportCount() > 0) {
blockLog.info("BLOCK* processReport: " blockLog.info("BLOCK* processReport 0x{}: "
+ "discarded non-initial block report from {}" + "discarded non-initial block report from {}"
+ " because namenode still in startup phase", nodeID); + " because namenode still in startup phase",
strBlockReportId, nodeID);
return !node.hasStaleStorages(); return !node.hasStaleStorages();
} }
@ -1836,7 +1839,7 @@ public class BlockManager {
// ordinary block reports. This shortens restart times. // ordinary block reports. This shortens restart times.
processFirstBlockReport(storageInfo, newReport); processFirstBlockReport(storageInfo, newReport);
} else { } else {
invalidatedBlocks = processReport(storageInfo, newReport); invalidatedBlocks = processReport(storageInfo, newReport, context);
} }
storageInfo.receivedBlockReport(); storageInfo.receivedBlockReport();
@ -1870,8 +1873,9 @@ public class BlockManager {
if (invalidatedBlocks != null) { if (invalidatedBlocks != null) {
for (Block b : invalidatedBlocks) { for (Block b : invalidatedBlocks) {
blockLog.info("BLOCK* processReport: {} on node {} size {} does not " + blockLog.info("BLOCK* processReport 0x{}: {} on node {} size {} " +
"belong to any file", b, node, b.getNumBytes()); "does not belong to any file", strBlockReportId,
b, node, b.getNumBytes());
} }
} }
@ -1880,10 +1884,11 @@ public class BlockManager {
if (metrics != null) { if (metrics != null) {
metrics.addBlockReport((int) (endTime - startTime)); metrics.addBlockReport((int) (endTime - startTime));
} }
blockLog.info("BLOCK* processReport: from storage {} node {}, " + blockLog.info("BLOCK* processReport 0x{}: from storage {} node {}, " +
"blocks: {}, hasStaleStorage: {}, processing time: {} msecs", storage "blocks: {}, hasStaleStorage: {}, processing time: {} msecs",
.getStorageID(), nodeID, newReport.getNumberOfBlocks(), strBlockReportId, storage.getStorageID(), nodeID,
node.hasStaleStorages(), (endTime - startTime)); newReport.getNumberOfBlocks(), node.hasStaleStorages(),
(endTime - startTime));
return !node.hasStaleStorages(); return !node.hasStaleStorages();
} }
@ -1989,7 +1994,8 @@ public class BlockManager {
private Collection<Block> processReport( private Collection<Block> processReport(
final DatanodeStorageInfo storageInfo, final DatanodeStorageInfo storageInfo,
final BlockListAsLongs report) throws IOException { final BlockListAsLongs report,
BlockReportContext context) throws IOException {
// Normal case: // Normal case:
// Modify the (block-->datanode) map, according to the difference // Modify the (block-->datanode) map, according to the difference
// between the old and new block report. // between the old and new block report.
@ -2002,6 +2008,11 @@ public class BlockManager {
reportDiff(storageInfo, report, reportDiff(storageInfo, report,
toAdd, toRemove, toInvalidate, toCorrupt, toUC); toAdd, toRemove, toInvalidate, toCorrupt, toUC);
String strBlockReportId = "";
if (context != null) {
strBlockReportId = Long.toHexString(context.getReportId());
}
DatanodeDescriptor node = storageInfo.getDatanodeDescriptor(); DatanodeDescriptor node = storageInfo.getDatanodeDescriptor();
// Process the blocks on each queue // Process the blocks on each queue
for (StatefulBlockInfo b : toUC) { for (StatefulBlockInfo b : toUC) {
@ -2016,8 +2027,8 @@ public class BlockManager {
numBlocksLogged++; numBlocksLogged++;
} }
if (numBlocksLogged > maxNumBlocksToLog) { if (numBlocksLogged > maxNumBlocksToLog) {
blockLog.info("BLOCK* processReport: logged info for {} of {} " + blockLog.info("BLOCK* processReport 0x{}: logged info for {} of {} " +
"reported.", maxNumBlocksToLog, numBlocksLogged); "reported.", strBlockReportId, maxNumBlocksToLog, numBlocksLogged);
} }
for (Block b : toInvalidate) { for (Block b : toInvalidate) {
addToInvalidates(b, node); addToInvalidates(b, node);