HDFS-9748. Avoid duplication in pendingReplications when addExpectedReplicasToPending is called twice. Contributed by Walter Su.
(cherry picked from commit7badf15604
) (cherry picked from commit459c31de4a
)
This commit is contained in:
parent
b49ac969bc
commit
a79f570b6c
|
@ -1710,6 +1710,9 @@ Release 2.8.0 - UNRELEASED
|
||||||
HDFS-9739. DatanodeStorage.isValidStorageId() is broken
|
HDFS-9739. DatanodeStorage.isValidStorageId() is broken
|
||||||
(Mingliang Liu via vinayakumarb)
|
(Mingliang Liu via vinayakumarb)
|
||||||
|
|
||||||
|
HDFS-9748. Avoid duplication in pendingReplications when
|
||||||
|
addExpectedReplicasToPending is called twice. (Walter Su via jing9)
|
||||||
|
|
||||||
Release 2.7.3 - UNRELEASED
|
Release 2.7.3 - UNRELEASED
|
||||||
|
|
||||||
INCOMPATIBLE CHANGES
|
INCOMPATIBLE CHANGES
|
||||||
|
|
|
@ -23,7 +23,6 @@ import java.io.PrintWriter;
|
||||||
import java.sql.Time;
|
import java.sql.Time;
|
||||||
import java.util.ArrayList;
|
import java.util.ArrayList;
|
||||||
import java.util.Arrays;
|
import java.util.Arrays;
|
||||||
import java.util.Collections;
|
|
||||||
import java.util.HashMap;
|
import java.util.HashMap;
|
||||||
import java.util.Iterator;
|
import java.util.Iterator;
|
||||||
import java.util.List;
|
import java.util.List;
|
||||||
|
@ -77,7 +76,7 @@ class PendingReplicationBlocks {
|
||||||
* @param block The corresponding block
|
* @param block The corresponding block
|
||||||
* @param targets The DataNodes where replicas of the block should be placed
|
* @param targets The DataNodes where replicas of the block should be placed
|
||||||
*/
|
*/
|
||||||
void increment(BlockInfo block, DatanodeDescriptor[] targets) {
|
void increment(BlockInfo block, DatanodeDescriptor... targets) {
|
||||||
synchronized (pendingReplications) {
|
synchronized (pendingReplications) {
|
||||||
PendingBlockInfo found = pendingReplications.get(block);
|
PendingBlockInfo found = pendingReplications.get(block);
|
||||||
if (found == null) {
|
if (found == null) {
|
||||||
|
@ -193,7 +192,11 @@ class PendingReplicationBlocks {
|
||||||
|
|
||||||
void incrementReplicas(DatanodeDescriptor... newTargets) {
|
void incrementReplicas(DatanodeDescriptor... newTargets) {
|
||||||
if (newTargets != null) {
|
if (newTargets != null) {
|
||||||
Collections.addAll(targets, newTargets);
|
for (DatanodeDescriptor newTarget : newTargets) {
|
||||||
|
if (!targets.contains(newTarget)) {
|
||||||
|
targets.add(newTarget);
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -80,13 +80,20 @@ public class TestPendingReplication {
|
||||||
|
|
||||||
|
|
||||||
//
|
//
|
||||||
// remove one item and reinsert it
|
// remove one item
|
||||||
//
|
//
|
||||||
BlockInfo blk = genBlockInfo(8, 8, 0);
|
BlockInfo blk = genBlockInfo(8, 8, 0);
|
||||||
pendingReplications.decrement(blk, storages[7].getDatanodeDescriptor()); // removes one replica
|
pendingReplications.decrement(blk, storages[7].getDatanodeDescriptor()); // removes one replica
|
||||||
assertEquals("pendingReplications.getNumReplicas ",
|
assertEquals("pendingReplications.getNumReplicas ",
|
||||||
7, pendingReplications.getNumReplicas(blk));
|
7, pendingReplications.getNumReplicas(blk));
|
||||||
|
|
||||||
|
//
|
||||||
|
// insert the same item twice should be counted as once
|
||||||
|
//
|
||||||
|
pendingReplications.increment(blk, storages[0].getDatanodeDescriptor());
|
||||||
|
assertEquals("pendingReplications.getNumReplicas ",
|
||||||
|
7, pendingReplications.getNumReplicas(blk));
|
||||||
|
|
||||||
for (int i = 0; i < 7; i++) {
|
for (int i = 0; i < 7; i++) {
|
||||||
// removes all replicas
|
// removes all replicas
|
||||||
pendingReplications.decrement(blk, storages[i].getDatanodeDescriptor());
|
pendingReplications.decrement(blk, storages[i].getDatanodeDescriptor());
|
||||||
|
|
Loading…
Reference in New Issue