From 73b86a5046fe3262dde7b05be46b18575e35fd5f Mon Sep 17 00:00:00 2001 From: yliu Date: Mon, 12 Oct 2015 14:40:14 +0800 Subject: [PATCH] HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in BlockManager#excessReplicateMap. (yliu) --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 3 +++ .../server/blockmanagement/BlockManager.java | 23 ++++++++----------- .../hdfs/server/namenode/NamenodeFsck.java | 4 ++-- 3 files changed, 14 insertions(+), 16 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 417143364e0..938546c0504 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -1512,6 +1512,9 @@ Release 2.8.0 - UNRELEASED HDFS-9221. HdfsServerConstants#ReplicaState#getState should avoid calling values() since it creates a temporary array. (Staffan Friberg via yliu) + HDFS-8988. Use LightWeightHashSet instead of LightWeightLinkedSet in + BlockManager#excessReplicateMap. (yliu) + OPTIMIZATIONS HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index 18bfc41cfbb..8a64b7445e4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -89,7 +89,6 @@ import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand; import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo; import org.apache.hadoop.hdfs.server.protocol.StorageReceivedDeletedBlocks; import org.apache.hadoop.hdfs.util.LightWeightHashSet; -import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy; import static org.apache.hadoop.hdfs.util.StripedBlockUtil.getInternalBlockLength; @@ -219,7 +218,7 @@ public class BlockManager implements BlockStatsMXBean { * Maps a StorageID to the set of blocks that are "extra" for this * DataNode. We'll eventually remove these extras. */ - public final Map> excessReplicateMap = + public final Map> excessReplicateMap = new HashMap<>(); /** @@ -1421,11 +1420,6 @@ public class BlockManager implements BlockStatsMXBean { */ @VisibleForTesting int computeRecoveryWorkForBlocks(List> blocksToRecover) { - int requiredReplication, numEffectiveReplicas; - List containingNodes; - BlockCollection bc; - int additionalReplRequired; - int scheduledWork = 0; List recovWork = new LinkedList<>(); @@ -1786,7 +1780,7 @@ public class BlockManager implements BlockStatsMXBean { Collection nodesCorrupt = corruptReplicas.getNodes(block); for (DatanodeStorageInfo storage : blocksMap.getStorages(block)) { final DatanodeDescriptor node = storage.getDatanodeDescriptor(); - LightWeightLinkedSet excessBlocks = + LightWeightHashSet excessBlocks = excessReplicateMap.get(node.getDatanodeUuid()); int countableReplica = storage.getState() == State.NORMAL ? 1 : 0; if ((nodesCorrupt != null) && (nodesCorrupt.contains(node))) @@ -3090,7 +3084,7 @@ public class BlockManager implements BlockStatsMXBean { postponeBlock(block); return; } - LightWeightLinkedSet excessBlocks = excessReplicateMap.get( + LightWeightHashSet excessBlocks = excessReplicateMap.get( cur.getDatanodeUuid()); if (excessBlocks == null || !excessBlocks.contains(block)) { if (!cur.isDecommissionInProgress() && !cur.isDecommissioned()) { @@ -3297,10 +3291,10 @@ public class BlockManager implements BlockStatsMXBean { private void addToExcessReplicate(DatanodeInfo dn, BlockInfo storedBlock) { assert namesystem.hasWriteLock(); - LightWeightLinkedSet excessBlocks = excessReplicateMap.get( + LightWeightHashSet excessBlocks = excessReplicateMap.get( dn.getDatanodeUuid()); if (excessBlocks == null) { - excessBlocks = new LightWeightLinkedSet<>(); + excessBlocks = new LightWeightHashSet<>(); excessReplicateMap.put(dn.getDatanodeUuid(), excessBlocks); } if (excessBlocks.add(storedBlock)) { @@ -3364,7 +3358,7 @@ public class BlockManager implements BlockStatsMXBean { // We've removed a block from a node, so it's definitely no longer // in "excess" there. // - LightWeightLinkedSet excessBlocks = excessReplicateMap.get( + LightWeightHashSet excessBlocks = excessReplicateMap.get( node.getDatanodeUuid()); if (excessBlocks != null) { if (excessBlocks.remove(storedBlock)) { @@ -3581,7 +3575,7 @@ public class BlockManager implements BlockStatsMXBean { } else if (node.isDecommissioned()) { decommissioned++; } else { - LightWeightLinkedSet blocksExcess = excessReplicateMap.get( + LightWeightHashSet blocksExcess = excessReplicateMap.get( node.getDatanodeUuid()); if (blocksExcess != null && blocksExcess.contains(b)) { excess++; @@ -3988,7 +3982,8 @@ public class BlockManager implements BlockStatsMXBean { private void removeFromExcessReplicateMap(Block block) { for (DatanodeStorageInfo info : blocksMap.getStorages(block)) { String uuid = info.getDatanodeDescriptor().getDatanodeUuid(); - LightWeightLinkedSet excessReplicas = excessReplicateMap.get(uuid); + LightWeightHashSet excessReplicas = + excessReplicateMap.get(uuid); if (excessReplicas != null) { if (excessReplicas.remove(block)) { excessBlocksCount.decrementAndGet(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java index 404a71e2db1..21666396ae5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java @@ -74,7 +74,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor; import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo; import org.apache.hadoop.hdfs.server.blockmanagement.NumberReplicas; import org.apache.hadoop.hdfs.server.datanode.CachingStrategy; -import org.apache.hadoop.hdfs.util.LightWeightLinkedSet; +import org.apache.hadoop.hdfs.util.LightWeightHashSet; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.net.NetworkTopology; import org.apache.hadoop.net.NodeBase; @@ -687,7 +687,7 @@ public class NamenodeFsck implements DataEncryptionKeyFactory { .getStorageType())); } if (showReplicaDetails) { - LightWeightLinkedSet blocksExcess = + LightWeightHashSet blocksExcess = bm.excessReplicateMap.get(dnDesc.getDatanodeUuid()); Collection corruptReplicas = bm.getCorruptReplicas(block.getLocalBlock());