HDFS-4052. Merge r1398631 from trunk to branch-2.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1595063 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
81cf200155
commit
8970cf4d86
|
@ -305,6 +305,9 @@ Release 2.4.1 - UNRELEASED
|
|||
HDFS-6361. TestIdUserGroup.testUserUpdateSetting failed due to out of range
|
||||
nfsnobody Id. (Yongjun Zhang via brandonli)
|
||||
|
||||
HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
|
||||
(Jing Zhao via suresh)
|
||||
|
||||
Release 2.4.0 - 2014-04-07
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -3203,6 +3203,9 @@ public class BlockManager {
|
|||
* @return number of blocks scheduled for removal during this iteration.
|
||||
*/
|
||||
private int invalidateWorkForOneNode(String nodeId) {
|
||||
final List<Block> toInvalidate;
|
||||
final DatanodeDescriptor dn;
|
||||
|
||||
namesystem.writeLock();
|
||||
try {
|
||||
// blocks should not be replicated or removed if safe mode is on
|
||||
|
@ -3212,10 +3215,23 @@ public class BlockManager {
|
|||
}
|
||||
// get blocks to invalidate for the nodeId
|
||||
assert nodeId != null;
|
||||
return invalidateBlocks.invalidateWork(nodeId);
|
||||
dn = datanodeManager.getDatanode(nodeId);
|
||||
if (dn == null) {
|
||||
invalidateBlocks.remove(nodeId);
|
||||
return 0;
|
||||
}
|
||||
toInvalidate = invalidateBlocks.invalidateWork(nodeId, dn);
|
||||
if (toInvalidate == null) {
|
||||
return 0;
|
||||
}
|
||||
} finally {
|
||||
namesystem.writeUnlock();
|
||||
}
|
||||
if (NameNode.stateChangeLog.isInfoEnabled()) {
|
||||
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
||||
+ ": ask " + dn + " to delete " + toInvalidate);
|
||||
}
|
||||
return toInvalidate.size();
|
||||
}
|
||||
|
||||
boolean blockHasEnoughRacks(Block b) {
|
||||
|
|
|
@ -170,36 +170,17 @@ class InvalidateBlocks {
|
|||
return pendingPeriodInMs - (Time.monotonicNow() - startupTime);
|
||||
}
|
||||
|
||||
/** Invalidate work for the storage. */
|
||||
int invalidateWork(final String storageId) {
|
||||
synchronized List<Block> invalidateWork(
|
||||
final String storageId, final DatanodeDescriptor dn) {
|
||||
final long delay = getInvalidationDelay();
|
||||
if (delay > 0) {
|
||||
if (BlockManager.LOG.isDebugEnabled()) {
|
||||
BlockManager.LOG
|
||||
.debug("Block deletion is delayed during NameNode startup. "
|
||||
+ "The deletion will start after " + delay + " ms.");
|
||||
+ "The deletion will start after " + delay + " ms.");
|
||||
}
|
||||
return 0;
|
||||
return null;
|
||||
}
|
||||
final DatanodeDescriptor dn = datanodeManager.getDatanode(storageId);
|
||||
if (dn == null) {
|
||||
remove(storageId);
|
||||
return 0;
|
||||
}
|
||||
final List<Block> toInvalidate = invalidateWork(storageId, dn);
|
||||
if (toInvalidate == null) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (NameNode.stateChangeLog.isInfoEnabled()) {
|
||||
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
||||
+ ": ask " + dn + " to delete " + toInvalidate);
|
||||
}
|
||||
return toInvalidate.size();
|
||||
}
|
||||
|
||||
private synchronized List<Block> invalidateWork(
|
||||
final String storageId, final DatanodeDescriptor dn) {
|
||||
final LightWeightHashSet<Block> set = node2blocks.get(storageId);
|
||||
if (set == null) {
|
||||
return null;
|
||||
|
|
Loading…
Reference in New Issue