HDFS-9204. DatanodeDescriptor#PendingReplicationWithoutTargets is wrongly calculated. Contributed by Mingliang Liu.

This commit is contained in:
Jing Zhao 2015-10-08 14:11:02 -07:00
parent 0841940a21
commit 118a35bc2e
4 changed files with 15 additions and 4 deletions

View File

@ -356,6 +356,9 @@ Trunk (Unreleased)
HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)
HDFS-9204. DatanodeDescriptor#PendingReplicationWithoutTargets is wrongly
calculated. (Mingliang Liu via jing9)
BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
HDFS-7347. Configurable erasure coding policy for individual files and

View File

@ -256,7 +256,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
private boolean disallowed = false;
// The number of replication work pending before targets are determined
private int PendingReplicationWithoutTargets = 0;
private int pendingReplicationWithoutTargets = 0;
// HB processing can use it to tell if it is the first HB since DN restarted
private boolean heartbeatedSinceRegistration = false;
@ -594,11 +594,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
}
void incrementPendingReplicationWithoutTargets() {
PendingReplicationWithoutTargets++;
pendingReplicationWithoutTargets++;
}
void decrementPendingReplicationWithoutTargets() {
PendingReplicationWithoutTargets--;
pendingReplicationWithoutTargets--;
}
/**
@ -651,7 +651,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
* The number of work items that are pending to be replicated
*/
int getNumberOfBlocksToBeReplicated() {
return PendingReplicationWithoutTargets + replicateBlocks.size();
return pendingReplicationWithoutTargets + replicateBlocks.size();
}
/**

View File

@ -30,6 +30,9 @@ class ReplicationWork extends BlockRecoveryWork {
int priority) {
super(block, bc, srcNodes, containingNodes,
liveReplicaStorages, additionalReplRequired, priority);
assert getSrcNodes().length == 1 :
"There should be exactly 1 source node that have been selected";
getSrcNodes()[0].incrementPendingReplicationWithoutTargets();
BlockManager.LOG.debug("Creating a ReplicationWork to recover " + block);
}

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.util.Iterator;
@ -135,6 +136,10 @@ public class TestUnderReplicatedBlocks {
assertEquals(NUM_OF_BLOCKS, bm.getUnderReplicatedNotMissingBlocks());
bm.computeDatanodeWork();
assertTrue("The number of replication work pending before targets are " +
"determined should be non-negative.",
(Integer)Whitebox.getInternalState(secondDn,
"pendingReplicationWithoutTargets") >= 0);
assertTrue("The number of blocks to be replicated should be less than "
+ "or equal to " + bm.replicationStreamsHardLimit,