HDFS-4052. BlockManager#invalidateWork should print log outside the lock. Contributed by Jing Zhao.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1398631 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Suresh Srinivas 2012-10-16 03:37:49 +00:00
parent a75673cbd8
commit b7887f31fb
3 changed files with 21 additions and 21 deletions

View File

@ -147,6 +147,9 @@ Trunk (Unreleased)
Block Pool Used, Block Pool Used(%) and Failed Volumes. Block Pool Used, Block Pool Used(%) and Failed Volumes.
(Brahma Reddy Battula via suresh) (Brahma Reddy Battula via suresh)
HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
(Jing Zhao via suresh)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -2856,6 +2856,9 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
* @return number of blocks scheduled for removal during this iteration. * @return number of blocks scheduled for removal during this iteration.
*/ */
private int invalidateWorkForOneNode(String nodeId) { private int invalidateWorkForOneNode(String nodeId) {
final List<Block> toInvalidate;
final DatanodeDescriptor dn;
namesystem.writeLock(); namesystem.writeLock();
try { try {
// blocks should not be replicated or removed if safe mode is on // blocks should not be replicated or removed if safe mode is on
@ -2865,10 +2868,23 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
} }
// get blocks to invalidate for the nodeId // get blocks to invalidate for the nodeId
assert nodeId != null; assert nodeId != null;
return invalidateBlocks.invalidateWork(nodeId); dn = datanodeManager.getDatanode(nodeId);
if (dn == null) {
invalidateBlocks.remove(nodeId);
return 0;
}
toInvalidate = invalidateBlocks.invalidateWork(nodeId, dn);
if (toInvalidate == null) {
return 0;
}
} finally { } finally {
namesystem.writeUnlock(); namesystem.writeUnlock();
} }
if (NameNode.stateChangeLog.isInfoEnabled()) {
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
+ ": ask " + dn + " to delete " + toInvalidate);
}
return toInvalidate.size();
} }
boolean blockHasEnoughRacks(Block b) { boolean blockHasEnoughRacks(Block b) {

View File

@ -134,26 +134,7 @@ class InvalidateBlocks {
return new ArrayList<String>(node2blocks.keySet()); return new ArrayList<String>(node2blocks.keySet());
} }
/** Invalidate work for the storage. */ synchronized List<Block> invalidateWork(
int invalidateWork(final String storageId) {
final DatanodeDescriptor dn = datanodeManager.getDatanode(storageId);
if (dn == null) {
remove(storageId);
return 0;
}
final List<Block> toInvalidate = invalidateWork(storageId, dn);
if (toInvalidate == null) {
return 0;
}
if (NameNode.stateChangeLog.isInfoEnabled()) {
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
+ ": ask " + dn + " to delete " + toInvalidate);
}
return toInvalidate.size();
}
private synchronized List<Block> invalidateWork(
final String storageId, final DatanodeDescriptor dn) { final String storageId, final DatanodeDescriptor dn) {
final LightWeightHashSet<Block> set = node2blocks.get(storageId); final LightWeightHashSet<Block> set = node2blocks.get(storageId);
if (set == null) { if (set == null) {