HDFS-9204. DatanodeDescriptor#PendingReplicationWithoutTargets is wrongly calculated. Contributed by Mingliang Liu.

This commit is contained in:
Jing Zhao 2015-10-08 14:11:02 -07:00
parent 0841940a21
commit 118a35bc2e
4 changed files with 15 additions and 4 deletions

View File

@ -356,6 +356,9 @@ Trunk (Unreleased)
HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw) HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw)
HDFS-9204. DatanodeDescriptor#PendingReplicationWithoutTargets is wrongly
calculated. (Mingliang Liu via jing9)
BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS
HDFS-7347. Configurable erasure coding policy for individual files and HDFS-7347. Configurable erasure coding policy for individual files and

View File

@ -256,7 +256,7 @@ public CachedBlocksList getPendingUncached() {
private boolean disallowed = false; private boolean disallowed = false;
// The number of replication work pending before targets are determined // The number of replication work pending before targets are determined
private int PendingReplicationWithoutTargets = 0; private int pendingReplicationWithoutTargets = 0;
// HB processing can use it to tell if it is the first HB since DN restarted // HB processing can use it to tell if it is the first HB since DN restarted
private boolean heartbeatedSinceRegistration = false; private boolean heartbeatedSinceRegistration = false;
@ -594,11 +594,11 @@ Iterator<BlockInfo> getBlockIterator(final String storageID) {
} }
void incrementPendingReplicationWithoutTargets() { void incrementPendingReplicationWithoutTargets() {
PendingReplicationWithoutTargets++; pendingReplicationWithoutTargets++;
} }
void decrementPendingReplicationWithoutTargets() { void decrementPendingReplicationWithoutTargets() {
PendingReplicationWithoutTargets--; pendingReplicationWithoutTargets--;
} }
/** /**
@ -651,7 +651,7 @@ void addBlocksToBeInvalidated(List<Block> blocklist) {
* The number of work items that are pending to be replicated * The number of work items that are pending to be replicated
*/ */
int getNumberOfBlocksToBeReplicated() { int getNumberOfBlocksToBeReplicated() {
return PendingReplicationWithoutTargets + replicateBlocks.size(); return pendingReplicationWithoutTargets + replicateBlocks.size();
} }
/** /**

View File

@ -30,6 +30,9 @@ public ReplicationWork(BlockInfo block, BlockCollection bc,
int priority) { int priority) {
super(block, bc, srcNodes, containingNodes, super(block, bc, srcNodes, containingNodes,
liveReplicaStorages, additionalReplRequired, priority); liveReplicaStorages, additionalReplRequired, priority);
assert getSrcNodes().length == 1 :
"There should be exactly 1 source node that have been selected";
getSrcNodes()[0].incrementPendingReplicationWithoutTargets();
BlockManager.LOG.debug("Creating a ReplicationWork to recover " + block); BlockManager.LOG.debug("Creating a ReplicationWork to recover " + block);
} }

View File

@ -31,6 +31,7 @@
import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
import org.junit.Test; import org.junit.Test;
import org.mockito.internal.util.reflection.Whitebox;
import java.util.Iterator; import java.util.Iterator;
@ -135,6 +136,10 @@ public void testNumberOfBlocksToBeReplicated() throws Exception {
assertEquals(NUM_OF_BLOCKS, bm.getUnderReplicatedNotMissingBlocks()); assertEquals(NUM_OF_BLOCKS, bm.getUnderReplicatedNotMissingBlocks());
bm.computeDatanodeWork(); bm.computeDatanodeWork();
assertTrue("The number of replication work pending before targets are " +
"determined should be non-negative.",
(Integer)Whitebox.getInternalState(secondDn,
"pendingReplicationWithoutTargets") >= 0);
assertTrue("The number of blocks to be replicated should be less than " assertTrue("The number of blocks to be replicated should be less than "
+ "or equal to " + bm.replicationStreamsHardLimit, + "or equal to " + bm.replicationStreamsHardLimit,