HDFS-7503. Namenode restart after large deletions can cause slow processReport (Arpit Agarwal)

This commit is contained in:
arp 2014-12-10 23:37:26 -08:00
parent d693a252bd
commit 390642acf3
2 changed files with 19 additions and 6 deletions

View File

@ -593,6 +593,9 @@ Release 2.6.1 - UNRELEASED
HDFS-7489. Incorrect locking in FsVolumeList#checkDirs can hang datanodes
(Noah Lorang via Colin P. McCabe)
HDFS-7503. Namenode restart after large deletions can cause slow
processReport. (Arpit Agarwal)
Release 2.6.0 - 2014-11-18
INCOMPATIBLE CHANGES

View File

@ -1785,6 +1785,8 @@ public boolean processReport(final DatanodeID nodeID,
final long startTime = Time.now(); //after acquiring write lock
final long endTime;
DatanodeDescriptor node;
Collection<Block> invalidatedBlocks = null;
try {
node = datanodeManager.getDatanode(nodeID);
if (node == null || !node.isAlive) {
@ -1813,7 +1815,7 @@ public boolean processReport(final DatanodeID nodeID,
// ordinary block reports. This shortens restart times.
processFirstBlockReport(storageInfo, newReport);
} else {
processReport(storageInfo, newReport);
invalidatedBlocks = processReport(storageInfo, newReport);
}
// Now that we have an up-to-date block report, we know that any
@ -1832,6 +1834,14 @@ public boolean processReport(final DatanodeID nodeID,
namesystem.writeUnlock();
}
if (invalidatedBlocks != null) {
for (Block b : invalidatedBlocks) {
blockLog.info("BLOCK* processReport: " + b + " on " + node
+ " size " + b.getNumBytes()
+ " does not belong to any file");
}
}
// Log the block report processing stats from Namenode perspective
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
if (metrics != null) {
@ -1875,8 +1885,9 @@ private void rescanPostponedMisreplicatedBlocks() {
}
}
private void processReport(final DatanodeStorageInfo storageInfo,
final BlockListAsLongs report) throws IOException {
private Collection<Block> processReport(
final DatanodeStorageInfo storageInfo,
final BlockListAsLongs report) throws IOException {
// Normal case:
// Modify the (block-->datanode) map, according to the difference
// between the old and new block report.
@ -1907,14 +1918,13 @@ private void processReport(final DatanodeStorageInfo storageInfo,
+ " of " + numBlocksLogged + " reported.");
}
for (Block b : toInvalidate) {
blockLog.info("BLOCK* processReport: "
+ b + " on " + node + " size " + b.getNumBytes()
+ " does not belong to any file");
addToInvalidates(b, node);
}
for (BlockToMarkCorrupt b : toCorrupt) {
markBlockAsCorrupt(b, storageInfo, node);
}
return toInvalidate;
}
/**