From a79f570b6cef75ad9ab2c06924db933282e9a061 Mon Sep 17 00:00:00 2001 From: Jing Zhao Date: Wed, 3 Feb 2016 13:30:09 -0800 Subject: [PATCH] HDFS-9748. Avoid duplication in pendingReplications when addExpectedReplicasToPending is called twice. Contributed by Walter Su. (cherry picked from commit 7badf156049b78cabf8537fff9846a0f9924a090) (cherry picked from commit 459c31de4a0db322f7a27a9b80dc3b2eecfa470d) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../server/blockmanagement/PendingReplicationBlocks.java | 9 ++++++--- .../server/blockmanagement/TestPendingReplication.java | 9 ++++++++- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index ace1e1c97cc..604e51de4d9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1710,6 +1710,9 @@ Release 2.8.0 - UNRELEASED HDFS-9739. DatanodeStorage.isValidStorageId() is broken (Mingliang Liu via vinayakumarb) + HDFS-9748. Avoid duplication in pendingReplications when + addExpectedReplicasToPending is called twice. (Walter Su via jing9) + Release 2.7.3 - UNRELEASED INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java index 04232cf95e3..71939de6ba8 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java @@ -23,7 +23,6 @@ import java.io.PrintWriter; import java.sql.Time; import java.util.ArrayList; import java.util.Arrays; -import java.util.Collections; import java.util.HashMap; import java.util.Iterator; import java.util.List; @@ -77,7 +76,7 @@ class PendingReplicationBlocks { * @param block The corresponding block * @param targets The DataNodes where replicas of the block should be placed */ - void increment(BlockInfo block, DatanodeDescriptor[] targets) { + void increment(BlockInfo block, DatanodeDescriptor... targets) { synchronized (pendingReplications) { PendingBlockInfo found = pendingReplications.get(block); if (found == null) { @@ -193,7 +192,11 @@ class PendingReplicationBlocks { void incrementReplicas(DatanodeDescriptor... newTargets) { if (newTargets != null) { - Collections.addAll(targets, newTargets); + for (DatanodeDescriptor newTarget : newTargets) { + if (!targets.contains(newTarget)) { + targets.add(newTarget); + } + } } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java index b5b0cf21df3..18f28d53efc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestPendingReplication.java @@ -80,13 +80,20 @@ public class TestPendingReplication { // - // remove one item and reinsert it + // remove one item // BlockInfo blk = genBlockInfo(8, 8, 0); pendingReplications.decrement(blk, storages[7].getDatanodeDescriptor()); // removes one replica assertEquals("pendingReplications.getNumReplicas ", 7, pendingReplications.getNumReplicas(blk)); + // + // insert the same item twice should be counted as once + // + pendingReplications.increment(blk, storages[0].getDatanodeDescriptor()); + assertEquals("pendingReplications.getNumReplicas ", + 7, pendingReplications.getNumReplicas(blk)); + for (int i = 0; i < 7; i++) { // removes all replicas pendingReplications.decrement(blk, storages[i].getDatanodeDescriptor());