HDFS-7503. Namenode restart after large deletions can cause slow processReport (Arpit Agarwal)
This commit is contained in:
parent
d93bfbace4
commit
91042c950f
|
@ -21,6 +21,9 @@ Release 2.6.1 - UNRELEASED
|
|||
HDFS-7489. Incorrect locking in FsVolumeList#checkDirs can hang datanodes
|
||||
(Noah Lorang via Colin P. McCabe)
|
||||
|
||||
HDFS-7503. Namenode restart after large deletions can cause slow
|
||||
processReport. (Arpit Agarwal)
|
||||
|
||||
Release 2.6.0 - 2014-11-18
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -1776,6 +1776,8 @@ public class BlockManager {
|
|||
final long startTime = Time.now(); //after acquiring write lock
|
||||
final long endTime;
|
||||
DatanodeDescriptor node;
|
||||
Collection<Block> invalidatedBlocks = null;
|
||||
|
||||
try {
|
||||
node = datanodeManager.getDatanode(nodeID);
|
||||
if (node == null || !node.isAlive) {
|
||||
|
@ -1804,7 +1806,7 @@ public class BlockManager {
|
|||
// ordinary block reports. This shortens restart times.
|
||||
processFirstBlockReport(storageInfo, newReport);
|
||||
} else {
|
||||
processReport(storageInfo, newReport);
|
||||
invalidatedBlocks = processReport(storageInfo, newReport);
|
||||
}
|
||||
|
||||
// Now that we have an up-to-date block report, we know that any
|
||||
|
@ -1823,6 +1825,14 @@ public class BlockManager {
|
|||
namesystem.writeUnlock();
|
||||
}
|
||||
|
||||
if (invalidatedBlocks != null) {
|
||||
for (Block b : invalidatedBlocks) {
|
||||
blockLog.info("BLOCK* processReport: " + b + " on " + node
|
||||
+ " size " + b.getNumBytes()
|
||||
+ " does not belong to any file");
|
||||
}
|
||||
}
|
||||
|
||||
// Log the block report processing stats from Namenode perspective
|
||||
final NameNodeMetrics metrics = NameNode.getNameNodeMetrics();
|
||||
if (metrics != null) {
|
||||
|
@ -1866,8 +1876,9 @@ public class BlockManager {
|
|||
}
|
||||
}
|
||||
|
||||
private void processReport(final DatanodeStorageInfo storageInfo,
|
||||
final BlockListAsLongs report) throws IOException {
|
||||
private Collection<Block> processReport(
|
||||
final DatanodeStorageInfo storageInfo,
|
||||
final BlockListAsLongs report) throws IOException {
|
||||
// Normal case:
|
||||
// Modify the (block-->datanode) map, according to the difference
|
||||
// between the old and new block report.
|
||||
|
@ -1898,14 +1909,13 @@ public class BlockManager {
|
|||
+ " of " + numBlocksLogged + " reported.");
|
||||
}
|
||||
for (Block b : toInvalidate) {
|
||||
blockLog.info("BLOCK* processReport: "
|
||||
+ b + " on " + node + " size " + b.getNumBytes()
|
||||
+ " does not belong to any file");
|
||||
addToInvalidates(b, node);
|
||||
}
|
||||
for (BlockToMarkCorrupt b : toCorrupt) {
|
||||
markBlockAsCorrupt(b, storageInfo, node);
|
||||
}
|
||||
|
||||
return toInvalidate;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
Loading…
Reference in New Issue