HDFS-7503. Namenode restart after large deletions can cause slow processReport (Arpit Agarwal)
This commit is contained in:
parent
d693a252bd
commit
390642acf3
|
@ -593,6 +593,9 @@ Release 2.6.1 - UNRELEASED
|
|||
HDFS-7489. Incorrect locking in FsVolumeList#checkDirs can hang datanodes
|
||||
(Noah Lorang via Colin P. McCabe)
|
||||
|
||||
HDFS-7503. Namenode restart after large deletions can cause slow
|
||||
processReport. (Arpit Agarwal)
|
||||
|
||||
Release 2.6.0 - 2014-11-18
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -1785,6 +1785,8 @@ public class BlockManager {
|
|||
final long startTime = Time.now(); //after acquiring write lock
|
||||
final long endTime;
|
||||
DatanodeDescriptor node;
|
||||
Collection<Block> invalidatedBlocks = null;
|
||||
|
||||
try {
|
||||
node = datanodeManager.getDatanode(nodeID);
|
||||
if (node == null || !node.isAlive) {
|
||||
|
@ -1813,7 +1815,7 @@ public class BlockManager {
|
|||
// ordinary block reports. This shortens restart times.
|
||||
processFirstBlockReport(storageInfo, newReport);
|
||||
} else {
|
||||
processReport(storageInfo, newReport);
|
||||
invalidatedBlocks = processReport(storageInfo, newReport);
|
||||
}
|
||||
|
||||
// Now that we have an up-to-date block report, we know that any
|
||||
|
@ -1832,6 +1834,14 @@ public class BlockManager {
|
|||
namesystem.writeUnlock();
|
||||
}
|
||||
|
||||
if (invalidatedBlocks != null) {
|
||||
for (Block b : invalidatedBlocks) {
|
||||
blockLog.info("BLOCK* processReport: " + b + " on " + node
|
||||
+ " size " + b.getNumBytes()
|
||||
+ " does not belong to any file");
|
||||
}
|
||||
}
|
||||
|
||||
// Log the block report processing stats from Namenode perspective
|
||||
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
|
||||
if (metrics != null) {
|
||||
|
@ -1875,8 +1885,9 @@ public class BlockManager {
|
|||
}
|
||||
}
|
||||
|
||||
private void processReport(final DatanodeStorageInfo storageInfo,
|
||||
final BlockListAsLongs report) throws IOException {
|
||||
private Collection<Block> processReport(
|
||||
final DatanodeStorageInfo storageInfo,
|
||||
final BlockListAsLongs report) throws IOException {
|
||||
// Normal case:
|
||||
// Modify the (block-->datanode) map, according to the difference
|
||||
// between the old and new block report.
|
||||
|
@ -1907,14 +1918,13 @@ public class BlockManager {
|
|||
+ " of " + numBlocksLogged + " reported.");
|
||||
}
|
||||
for (Block b : toInvalidate) {
|
||||
blockLog.info("BLOCK* processReport: "
|
||||
+ b + " on " + node + " size " + b.getNumBytes()
|
||||
+ " does not belong to any file");
|
||||
addToInvalidates(b, node);
|
||||
}
|
||||
for (BlockToMarkCorrupt b : toCorrupt) {
|
||||
markBlockAsCorrupt(b, storageInfo, node);
|
||||
}
|
||||
|
||||
return toInvalidate;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue