diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 1116f800fb2..95351a2298e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -356,6 +356,9 @@ Trunk (Unreleased) HDFS-8800. hdfs --daemon stop namenode corrupts logs (John Smith via aw) + HDFS-9204. DatanodeDescriptor#PendingReplicationWithoutTargets is wrongly + calculated. (Mingliang Liu via jing9) + BREAKDOWN OF HDFS-7285 SUBTASKS AND RELATED JIRAS HDFS-7347. Configurable erasure coding policy for individual files and diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java index b258f0600e2..fde645ebb2b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java @@ -256,7 +256,7 @@ public class DatanodeDescriptor extends DatanodeInfo { private boolean disallowed = false; // The number of replication work pending before targets are determined - private int PendingReplicationWithoutTargets = 0; + private int pendingReplicationWithoutTargets = 0; // HB processing can use it to tell if it is the first HB since DN restarted private boolean heartbeatedSinceRegistration = false; @@ -594,11 +594,11 @@ public class DatanodeDescriptor extends DatanodeInfo { } void incrementPendingReplicationWithoutTargets() { - PendingReplicationWithoutTargets++; + pendingReplicationWithoutTargets++; } void decrementPendingReplicationWithoutTargets() { - PendingReplicationWithoutTargets--; + pendingReplicationWithoutTargets--; } /** @@ -651,7 +651,7 @@ public class DatanodeDescriptor extends DatanodeInfo { * The number of work items that are pending to be replicated */ int getNumberOfBlocksToBeReplicated() { - return PendingReplicationWithoutTargets + replicateBlocks.size(); + return pendingReplicationWithoutTargets + replicateBlocks.size(); } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java index 8266f453737..8a3900c22a5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java @@ -30,6 +30,9 @@ class ReplicationWork extends BlockRecoveryWork { int priority) { super(block, bc, srcNodes, containingNodes, liveReplicaStorages, additionalReplRequired, priority); + assert getSrcNodes().length == 1 : + "There should be exactly 1 source node that have been selected"; + getSrcNodes()[0].incrementPendingReplicationWithoutTargets(); BlockManager.LOG.debug("Creating a ReplicationWork to recover " + block); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java index 27b35f0b56a..c0b54b0f624 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java @@ -31,6 +31,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster; import org.apache.hadoop.hdfs.protocol.ExtendedBlock; import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils; import org.junit.Test; +import org.mockito.internal.util.reflection.Whitebox; import java.util.Iterator; @@ -135,6 +136,10 @@ public class TestUnderReplicatedBlocks { assertEquals(NUM_OF_BLOCKS, bm.getUnderReplicatedNotMissingBlocks()); bm.computeDatanodeWork(); + assertTrue("The number of replication work pending before targets are " + + "determined should be non-negative.", + (Integer)Whitebox.getInternalState(secondDn, + "pendingReplicationWithoutTargets") >= 0); assertTrue("The number of blocks to be replicated should be less than " + "or equal to " + bm.replicationStreamsHardLimit,