HDFS-4052. Merge r1398631 from trunk to branch-2.
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1595063 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
81cf200155
commit
8970cf4d86
|
@ -305,6 +305,9 @@ Release 2.4.1 - UNRELEASED
|
||||||
HDFS-6361. TestIdUserGroup.testUserUpdateSetting failed due to out of range
|
HDFS-6361. TestIdUserGroup.testUserUpdateSetting failed due to out of range
|
||||||
nfsnobody Id. (Yongjun Zhang via brandonli)
|
nfsnobody Id. (Yongjun Zhang via brandonli)
|
||||||
|
|
||||||
|
HDFS-4052. BlockManager#invalidateWork should print log outside the lock.
|
||||||
|
(Jing Zhao via suresh)
|
||||||
|
|
||||||
Release 2.4.0 - 2014-04-07
|
Release 2.4.0 - 2014-04-07
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -3203,6 +3203,9 @@ public class BlockManager {
|
||||||
* @return number of blocks scheduled for removal during this iteration.
|
* @return number of blocks scheduled for removal during this iteration.
|
||||||
*/
|
*/
|
||||||
private int invalidateWorkForOneNode(String nodeId) {
|
private int invalidateWorkForOneNode(String nodeId) {
|
||||||
|
final List<Block> toInvalidate;
|
||||||
|
final DatanodeDescriptor dn;
|
||||||
|
|
||||||
namesystem.writeLock();
|
namesystem.writeLock();
|
||||||
try {
|
try {
|
||||||
// blocks should not be replicated or removed if safe mode is on
|
// blocks should not be replicated or removed if safe mode is on
|
||||||
|
@ -3212,10 +3215,23 @@ public class BlockManager {
|
||||||
}
|
}
|
||||||
// get blocks to invalidate for the nodeId
|
// get blocks to invalidate for the nodeId
|
||||||
assert nodeId != null;
|
assert nodeId != null;
|
||||||
return invalidateBlocks.invalidateWork(nodeId);
|
dn = datanodeManager.getDatanode(nodeId);
|
||||||
|
if (dn == null) {
|
||||||
|
invalidateBlocks.remove(nodeId);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
toInvalidate = invalidateBlocks.invalidateWork(nodeId, dn);
|
||||||
|
if (toInvalidate == null) {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
} finally {
|
} finally {
|
||||||
namesystem.writeUnlock();
|
namesystem.writeUnlock();
|
||||||
}
|
}
|
||||||
|
if (NameNode.stateChangeLog.isInfoEnabled()) {
|
||||||
|
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
||||||
|
+ ": ask " + dn + " to delete " + toInvalidate);
|
||||||
|
}
|
||||||
|
return toInvalidate.size();
|
||||||
}
|
}
|
||||||
|
|
||||||
boolean blockHasEnoughRacks(Block b) {
|
boolean blockHasEnoughRacks(Block b) {
|
||||||
|
|
|
@ -170,36 +170,17 @@ class InvalidateBlocks {
|
||||||
return pendingPeriodInMs - (Time.monotonicNow() - startupTime);
|
return pendingPeriodInMs - (Time.monotonicNow() - startupTime);
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Invalidate work for the storage. */
|
synchronized List<Block> invalidateWork(
|
||||||
int invalidateWork(final String storageId) {
|
final String storageId, final DatanodeDescriptor dn) {
|
||||||
final long delay = getInvalidationDelay();
|
final long delay = getInvalidationDelay();
|
||||||
if (delay > 0) {
|
if (delay > 0) {
|
||||||
if (BlockManager.LOG.isDebugEnabled()) {
|
if (BlockManager.LOG.isDebugEnabled()) {
|
||||||
BlockManager.LOG
|
BlockManager.LOG
|
||||||
.debug("Block deletion is delayed during NameNode startup. "
|
.debug("Block deletion is delayed during NameNode startup. "
|
||||||
+ "The deletion will start after " + delay + " ms.");
|
+ "The deletion will start after " + delay + " ms.");
|
||||||
}
|
}
|
||||||
return 0;
|
return null;
|
||||||
}
|
}
|
||||||
final DatanodeDescriptor dn = datanodeManager.getDatanode(storageId);
|
|
||||||
if (dn == null) {
|
|
||||||
remove(storageId);
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
final List<Block> toInvalidate = invalidateWork(storageId, dn);
|
|
||||||
if (toInvalidate == null) {
|
|
||||||
return 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (NameNode.stateChangeLog.isInfoEnabled()) {
|
|
||||||
NameNode.stateChangeLog.info("BLOCK* " + getClass().getSimpleName()
|
|
||||||
+ ": ask " + dn + " to delete " + toInvalidate);
|
|
||||||
}
|
|
||||||
return toInvalidate.size();
|
|
||||||
}
|
|
||||||
|
|
||||||
private synchronized List<Block> invalidateWork(
|
|
||||||
final String storageId, final DatanodeDescriptor dn) {
|
|
||||||
final LightWeightHashSet<Block> set = node2blocks.get(storageId);
|
final LightWeightHashSet<Block> set = node2blocks.get(storageId);
|
||||||
if (set == null) {
|
if (set == null) {
|
||||||
return null;
|
return null;
|
||||||
|
|
Loading…
Reference in New Issue