HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn)
git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1406278 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
b68bd472dc
commit
54b70db347
|
@ -1940,6 +1940,8 @@ Release 0.23.5 - UNRELEASED
|
||||||
|
|
||||||
OPTIMIZATIONS
|
OPTIMIZATIONS
|
||||||
|
|
||||||
|
HDFS-4075. Reduce recommissioning overhead (Kihwal Lee via daryn)
|
||||||
|
|
||||||
BUG FIXES
|
BUG FIXES
|
||||||
|
|
||||||
HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7 (Trevor
|
HDFS-3829. TestHftpURLTimeouts fails intermittently with JDK7 (Trevor
|
||||||
|
|
|
@ -2696,6 +2696,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
||||||
void processOverReplicatedBlocksOnReCommission(
|
void processOverReplicatedBlocksOnReCommission(
|
||||||
final DatanodeDescriptor srcNode) {
|
final DatanodeDescriptor srcNode) {
|
||||||
final Iterator<? extends Block> it = srcNode.getBlockIterator();
|
final Iterator<? extends Block> it = srcNode.getBlockIterator();
|
||||||
|
int numOverReplicated = 0;
|
||||||
while(it.hasNext()) {
|
while(it.hasNext()) {
|
||||||
final Block block = it.next();
|
final Block block = it.next();
|
||||||
BlockCollection bc = blocksMap.getBlockCollection(block);
|
BlockCollection bc = blocksMap.getBlockCollection(block);
|
||||||
|
@ -2705,8 +2706,11 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block
|
||||||
if (numCurrentReplica > expectedReplication) {
|
if (numCurrentReplica > expectedReplication) {
|
||||||
// over-replicated block
|
// over-replicated block
|
||||||
processOverReplicatedBlock(block, expectedReplication, null, null);
|
processOverReplicatedBlock(block, expectedReplication, null, null);
|
||||||
|
numOverReplicated++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
LOG.info("Invalidated " + numOverReplicated + " over-replicated blocks on " +
|
||||||
|
srcNode + " during recommissioning");
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -608,9 +608,13 @@ public class DatanodeManager {
|
||||||
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
|
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
|
||||||
LOG.info("Stop Decommissioning " + node);
|
LOG.info("Stop Decommissioning " + node);
|
||||||
heartbeatManager.stopDecommission(node);
|
heartbeatManager.stopDecommission(node);
|
||||||
|
// Over-replicated blocks will be detected and processed when
|
||||||
|
// the dead node comes back and send in its full block report.
|
||||||
|
if (node.isAlive) {
|
||||||
blockManager.processOverReplicatedBlocksOnReCommission(node);
|
blockManager.processOverReplicatedBlocksOnReCommission(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Generate new storage ID.
|
* Generate new storage ID.
|
||||||
|
|
Loading…
Reference in New Issue