From f98c343c7f11c165bcc0f7cdbaa2a3998b12cfd2 Mon Sep 17 00:00:00 2001 From: Tsz-wo Sze Date: Tue, 17 Sep 2013 02:38:47 +0000 Subject: [PATCH] HDFS-5207. In BlockPlacementPolicy.chooseTarget(..), change the writer and the excludedNodes parameter types respectively to Node and Set. Contributed by Junping Du git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1523875 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt | 4 + .../server/blockmanagement/BlockManager.java | 13 +-- .../blockmanagement/BlockPlacementPolicy.java | 13 +-- .../BlockPlacementPolicyDefault.java | 99 ++++++++++--------- .../BlockPlacementPolicyWithNodeGroup.java | 36 +++---- .../hdfs/server/namenode/FSNamesystem.java | 5 +- .../server/namenode/NameNodeRpcServer.java | 15 +-- .../TestReplicationPolicy.java | 26 ++--- .../TestReplicationPolicyWithNodeGroup.java | 11 +-- .../server/namenode/TestAddBlockRetry.java | 4 +- 10 files changed, 121 insertions(+), 105 deletions(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 912055ec191..ed45357a727 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -279,6 +279,10 @@ Release 2.3.0 - UNRELEASED methods; replace HashMap with Map in parameter declarations and cleanup some related code. (szetszwo) + HDFS-5207. In BlockPlacementPolicy.chooseTarget(..), change the writer + and the excludedNodes parameter types respectively to Node and Set. + (Junping Du via szetszwo) + OPTIMIZATIONS BUG FIXES diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java index b5babaf67ce..dd13a8b450c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java @@ -26,6 +26,7 @@ import java.util.Collection; import java.util.Collections; import java.util.EnumSet; import java.util.HashMap; +import java.util.HashSet; import java.util.Iterator; import java.util.LinkedList; import java.util.List; @@ -1256,13 +1257,13 @@ public class BlockManager { namesystem.writeUnlock(); } - final Map excludedNodes = new HashMap(); + final Set excludedNodes = new HashSet(); for(ReplicationWork rw : work){ // Exclude all of the containing nodes from being targets. // This list includes decommissioning or corrupt nodes. excludedNodes.clear(); for (DatanodeDescriptor dn : rw.containingNodes) { - excludedNodes.put(dn, dn); + excludedNodes.add(dn); } // choose replication targets: NOT HOLDING THE GLOBAL LOCK @@ -1375,12 +1376,12 @@ public class BlockManager { * * @throws IOException * if the number of targets < minimum replication. - * @see BlockPlacementPolicy#chooseTarget(String, int, DatanodeDescriptor, - * List, boolean, HashMap, long) + * @see BlockPlacementPolicy#chooseTarget(String, int, Node, + * List, boolean, Set, long) */ public DatanodeDescriptor[] chooseTarget(final String src, final int numOfReplicas, final DatanodeDescriptor client, - final HashMap excludedNodes, + final Set excludedNodes, final long blocksize, List favoredNodes) throws IOException { List favoredDatanodeDescriptors = getDatanodeDescriptors(favoredNodes); @@ -3248,7 +3249,7 @@ assert storedBlock.findDatanode(dn) < 0 : "Block " + block } private void chooseTargets(BlockPlacementPolicy blockplacement, - Map excludedNodes) { + Set excludedNodes) { targets = blockplacement.chooseTarget(bc.getName(), additionalReplRequired, srcNode, liveReplicaNodes, false, excludedNodes, block.getNumBytes()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java index 71d89d52b7b..48f77298dbb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java @@ -21,6 +21,7 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -68,22 +69,22 @@ public abstract class BlockPlacementPolicy { */ public abstract DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, - DatanodeDescriptor writer, + Node writer, List chosenNodes, boolean returnChosenNodes, - Map excludedNodes, + Set excludedNodes, long blocksize); /** - * Same as {@link #chooseTarget(String, int, DatanodeDescriptor, List, boolean, - * HashMap, long)} with added parameter {@code favoredDatanodes} + * Same as {@link #chooseTarget(String, int, Node, List, boolean, + * Set, long)} with added parameter {@code favoredDatanodes} * @param favoredNodes datanodes that should be favored as targets. This * is only a hint and due to cluster state, namenode may not be * able to place the blocks on these datanodes. */ DatanodeDescriptor[] chooseTarget(String src, - int numOfReplicas, DatanodeDescriptor writer, - Map excludedNodes, + int numOfReplicas, Node writer, + Set excludedNodes, long blocksize, List favoredNodes) { // This class does not provide the functionality of placing // a block in favored datanodes. The implementations of this class diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java index 8fab427907c..7fb4cf01f7d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java @@ -21,9 +21,8 @@ import static org.apache.hadoop.util.Time.now; import java.util.ArrayList; import java.util.Collection; -import java.util.HashMap; +import java.util.HashSet; import java.util.List; -import java.util.Map; import java.util.Set; import java.util.TreeSet; @@ -106,10 +105,10 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { @Override public DatanodeDescriptor[] chooseTarget(String srcPath, int numOfReplicas, - DatanodeDescriptor writer, + Node writer, List chosenNodes, boolean returnChosenNodes, - Map excludedNodes, + Set excludedNodes, long blocksize) { return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes, blocksize); @@ -118,8 +117,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { @Override DatanodeDescriptor[] chooseTarget(String src, int numOfReplicas, - DatanodeDescriptor writer, - Map excludedNodes, + Node writer, + Set excludedNodes, long blocksize, List favoredNodes) { try { @@ -130,8 +129,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { excludedNodes, blocksize); } - Map favoriteAndExcludedNodes = excludedNodes == null ? - new HashMap() : new HashMap(excludedNodes); + Set favoriteAndExcludedNodes = excludedNodes == null ? + new HashSet() : new HashSet(excludedNodes); // Choose favored nodes List results = new ArrayList(); @@ -150,10 +149,10 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { + " with favored node " + favoredNode); continue; } - favoriteAndExcludedNodes.put(target, target); + favoriteAndExcludedNodes.add(target); } - if (results.size() < numOfReplicas) { + if (results.size() < numOfReplicas) { // Not enough favored nodes, choose other nodes. numOfReplicas -= results.size(); DatanodeDescriptor[] remainingTargets = @@ -175,17 +174,17 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { /** This is the implementation. */ private DatanodeDescriptor[] chooseTarget(int numOfReplicas, - DatanodeDescriptor writer, + Node writer, List chosenNodes, boolean returnChosenNodes, - Map excludedNodes, + Set excludedNodes, long blocksize) { if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) { return DatanodeDescriptor.EMPTY_ARRAY; } if (excludedNodes == null) { - excludedNodes = new HashMap(); + excludedNodes = new HashSet(); } int[] result = getMaxNodesPerRack(chosenNodes, numOfReplicas); @@ -200,12 +199,12 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { } if (!clusterMap.contains(writer)) { - writer=null; + writer = null; } boolean avoidStaleNodes = (stats != null && stats.isAvoidingStaleDataNodesForWrite()); - DatanodeDescriptor localNode = chooseTarget(numOfReplicas, writer, + Node localNode = chooseTarget(numOfReplicas, writer, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes); if (!returnChosenNodes) { results.removeAll(chosenNodes); @@ -228,10 +227,20 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { return new int[] {numOfReplicas, maxNodesPerRack}; } - /* choose numOfReplicas from all data nodes */ - private DatanodeDescriptor chooseTarget(int numOfReplicas, - DatanodeDescriptor writer, - Map excludedNodes, + /** + * choose numOfReplicas from all data nodes + * @param numOfReplicas additional number of replicas wanted + * @param writer the writer's machine, could be a non-DatanodeDescriptor node + * @param excludedNodes datanodes that should not be considered as targets + * @param blocksize size of the data to be written + * @param maxNodesPerRack max nodes allowed per rack + * @param results the target nodes already chosen + * @param avoidStaleNodes avoid stale nodes in replica choosing + * @return local node of writer (not chosen node) + */ + private Node chooseTarget(int numOfReplicas, + Node writer, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, @@ -243,13 +252,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { int numOfResults = results.size(); boolean newBlock = (numOfResults==0); - if (writer == null && !newBlock) { + if ((writer == null || !(writer instanceof DatanodeDescriptor)) && !newBlock) { writer = results.get(0); } // Keep a copy of original excludedNodes - final Map oldExcludedNodes = avoidStaleNodes ? - new HashMap(excludedNodes) : null; + final Set oldExcludedNodes = avoidStaleNodes ? + new HashSet(excludedNodes) : null; try { if (numOfResults == 0) { writer = chooseLocalNode(writer, excludedNodes, blocksize, @@ -296,7 +305,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { // We need to additionally exclude the nodes that were added to the // result list in the successful calls to choose*() above. for (Node node : results) { - oldExcludedNodes.put(node, node); + oldExcludedNodes.add(node); } // Set numOfReplicas, since it can get out of sync with the result list // if the NotEnoughReplicasException was thrown in chooseRandom(). @@ -314,8 +323,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { * choose a node on the same rack * @return the chosen node */ - protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine, - Map excludedNodes, + protected DatanodeDescriptor chooseLocalNode(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, @@ -325,13 +334,13 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { if (localMachine == null) return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes); - if (preferLocalNode) { + if (preferLocalNode && localMachine instanceof DatanodeDescriptor) { + DatanodeDescriptor localDatanode = (DatanodeDescriptor) localMachine; // otherwise try local machine first - Node oldNode = excludedNodes.put(localMachine, localMachine); - if (oldNode == null) { // was not in the excluded list - if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize, + if (excludedNodes.add(localMachine)) { // was not in the excluded list + if (addIfIsGoodTarget(localDatanode, excludedNodes, blocksize, maxNodesPerRack, false, results, avoidStaleNodes) >= 0) { - return localMachine; + return localDatanode; } } } @@ -347,9 +356,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { * @return number of new excluded nodes */ protected int addToExcludedNodes(DatanodeDescriptor localMachine, - Map excludedNodes) { - Node node = excludedNodes.put(localMachine, localMachine); - return node == null?1:0; + Set excludedNodes) { + return excludedNodes.add(localMachine) ? 1 : 0; } /** @@ -360,8 +368,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { * in the cluster. * @return the chosen node */ - protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine, - Map excludedNodes, + protected DatanodeDescriptor chooseLocalRack(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, @@ -412,7 +420,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { protected void chooseRemoteRack(int numOfReplicas, DatanodeDescriptor localMachine, - Map excludedNodes, + Set excludedNodes, long blocksize, int maxReplicasPerRack, List results, @@ -436,7 +444,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { * @return the chosen node, if there is any. */ protected DatanodeDescriptor chooseRandom(String scope, - Map excludedNodes, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, @@ -452,7 +460,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { */ protected DatanodeDescriptor chooseRandom(int numOfReplicas, String scope, - Map excludedNodes, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, @@ -460,7 +468,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { throws NotEnoughReplicasException { int numOfAvailableNodes = clusterMap.countNumOfAvailableNodes( - scope, excludedNodes.keySet()); + scope, excludedNodes); StringBuilder builder = null; if (LOG.isDebugEnabled()) { builder = debugLoggingBuilder.get(); @@ -472,8 +480,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { while(numOfReplicas > 0 && numOfAvailableNodes > 0) { DatanodeDescriptor chosenNode = (DatanodeDescriptor)clusterMap.chooseRandom(scope); - Node oldNode = excludedNodes.put(chosenNode, chosenNode); - if (oldNode == null) { + if (excludedNodes.add(chosenNode)) { //was not in the excluded list numOfAvailableNodes--; int newExcludedNodes = addIfIsGoodTarget(chosenNode, excludedNodes, @@ -506,16 +513,16 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { /** * If the given node is a good target, add it to the result list and - * update the excluded node map. + * update the set of excluded nodes. * @return -1 if the given is not a good target; - * otherwise, return the number of excluded nodes added to the map. + * otherwise, return the number of nodes added to excludedNodes set. */ int addIfIsGoodTarget(DatanodeDescriptor node, - Map excludedNodes, + Set excludedNodes, long blockSize, int maxNodesPerRack, boolean considerLoad, - List results, + List results, boolean avoidStaleNodes) { if (isGoodTarget(node, blockSize, maxNodesPerRack, considerLoad, results, avoidStaleNodes)) { @@ -614,7 +621,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy { * starts from the writer and traverses all nodes * This is basically a traveling salesman problem. */ - private DatanodeDescriptor[] getPipeline(DatanodeDescriptor writer, + private DatanodeDescriptor[] getPipeline(Node writer, DatanodeDescriptor[] nodes) { if (nodes.length==0) return nodes; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java index f44f28da825..1b8f916dd2c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java @@ -22,6 +22,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; @@ -63,8 +64,8 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau * @return the chosen node */ @Override - protected DatanodeDescriptor chooseLocalNode(DatanodeDescriptor localMachine, - Map excludedNodes, long blocksize, int maxNodesPerRack, + protected DatanodeDescriptor chooseLocalNode(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // if no local machine, randomly choose one node @@ -72,14 +73,16 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau return chooseRandom(NodeBase.ROOT, excludedNodes, blocksize, maxNodesPerRack, results, avoidStaleNodes); - // otherwise try local machine first - Node oldNode = excludedNodes.put(localMachine, localMachine); - if (oldNode == null) { // was not in the excluded list - if (addIfIsGoodTarget(localMachine, excludedNodes, blocksize, - maxNodesPerRack, false, results, avoidStaleNodes) >= 0) { - return localMachine; + if (localMachine instanceof DatanodeDescriptor) { + DatanodeDescriptor localDataNode = (DatanodeDescriptor)localMachine; + // otherwise try local machine first + if (excludedNodes.add(localMachine)) { // was not in the excluded list + if (addIfIsGoodTarget(localDataNode, excludedNodes, blocksize, + maxNodesPerRack, false, results, avoidStaleNodes) >= 0) { + return localDataNode; + } } - } + } // try a node on local node group DatanodeDescriptor chosenNode = chooseLocalNodeGroup( @@ -95,8 +98,8 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau @Override - protected DatanodeDescriptor chooseLocalRack(DatanodeDescriptor localMachine, - Map excludedNodes, long blocksize, int maxNodesPerRack, + protected DatanodeDescriptor chooseLocalRack(Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // no local machine, so choose a random machine @@ -142,7 +145,7 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau @Override protected void chooseRemoteRack(int numOfReplicas, - DatanodeDescriptor localMachine, Map excludedNodes, + DatanodeDescriptor localMachine, Set excludedNodes, long blocksize, int maxReplicasPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { int oldNumOfReplicas = results.size(); @@ -168,8 +171,8 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau * @return the chosen node */ private DatanodeDescriptor chooseLocalNodeGroup( - NetworkTopologyWithNodeGroup clusterMap, DatanodeDescriptor localMachine, - Map excludedNodes, long blocksize, int maxNodesPerRack, + NetworkTopologyWithNodeGroup clusterMap, Node localMachine, + Set excludedNodes, long blocksize, int maxNodesPerRack, List results, boolean avoidStaleNodes) throws NotEnoughReplicasException { // no local machine, so choose a random machine @@ -225,13 +228,12 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau */ @Override protected int addToExcludedNodes(DatanodeDescriptor chosenNode, - Map excludedNodes) { + Set excludedNodes) { int countOfExcludedNodes = 0; String nodeGroupScope = chosenNode.getNetworkLocation(); List leafNodes = clusterMap.getLeaves(nodeGroupScope); for (Node leafNode : leafNodes) { - Node node = excludedNodes.put(leafNode, leafNode); - if (node == null) { + if (excludedNodes.add(leafNode)) { // not a existing node in excludedNodes countOfExcludedNodes++; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java index f4f8ccedec0..30f6d18de84 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java @@ -223,7 +223,6 @@ import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod; import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.security.token.delegation.DelegationKey; import org.apache.hadoop.util.Daemon; import org.apache.hadoop.util.DataChecksum; @@ -2443,7 +2442,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, * client to "try again later". */ LocatedBlock getAdditionalBlock(String src, long fileId, String clientName, - ExtendedBlock previous, HashMap excludedNodes, + ExtendedBlock previous, Set excludedNodes, List favoredNodes) throws LeaseExpiredException, NotReplicatedYetException, QuotaExceededException, SafeModeException, UnresolvedLinkException, @@ -2642,7 +2641,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats, /** @see NameNode#getAdditionalDatanode(String, ExtendedBlock, DatanodeInfo[], DatanodeInfo[], int, String) */ LocatedBlock getAdditionalDatanode(String src, final ExtendedBlock blk, - final DatanodeInfo[] existings, final HashMap excludes, + final DatanodeInfo[] existings, final Set excludes, final int numAdditionalNodes, final String clientName ) throws IOException { //check if the feature is enabled diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java index 2d729e69e1f..69805785c01 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java @@ -29,8 +29,9 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collection; -import java.util.HashMap; +import java.util.HashSet; import java.util.List; +import java.util.Set; import org.apache.commons.logging.Log; import org.apache.hadoop.HadoopIllegalArgumentException; @@ -547,11 +548,11 @@ class NameNodeRpcServer implements NamenodeProtocols { stateChangeLog.debug("*BLOCK* NameNode.addBlock: file " + src + " fileId=" + fileId + " for " + clientName); } - HashMap excludedNodesSet = null; + Set excludedNodesSet = null; if (excludedNodes != null) { - excludedNodesSet = new HashMap(excludedNodes.length); + excludedNodesSet = new HashSet(excludedNodes.length); for (Node node : excludedNodes) { - excludedNodesSet.put(node, node); + excludedNodesSet.add(node); } } List favoredNodesList = (favoredNodes == null) ? null @@ -579,11 +580,11 @@ class NameNodeRpcServer implements NamenodeProtocols { metrics.incrGetAdditionalDatanodeOps(); - HashMap excludeSet = null; + Set excludeSet = null; if (excludes != null) { - excludeSet = new HashMap(excludes.length); + excludeSet = new HashSet(excludes.length); for (Node node : excludes) { - excludeSet.put(node, node); + excludeSet.add(node); } } return namesystem.getAdditionalDatanode(src, blk, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java index ededf7e1dc9..dfd71491dd2 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java @@ -29,9 +29,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Random; +import java.util.Set; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.ContentSummary; @@ -187,7 +189,7 @@ public class TestReplicationPolicy { } private static DatanodeDescriptor[] chooseTarget(int numOfReplicas, - List chosenNodes, Map excludedNodes) { + List chosenNodes, Set excludedNodes) { return chooseTarget(numOfReplicas, dataNodes[0], chosenNodes, excludedNodes); } @@ -195,7 +197,7 @@ public class TestReplicationPolicy { int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, - Map excludedNodes) { + Set excludedNodes) { return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes, false, excludedNodes, BLOCK_SIZE); } @@ -210,25 +212,25 @@ public class TestReplicationPolicy { */ @Test public void testChooseTarget2() throws Exception { - HashMap excludedNodes; + Set excludedNodes; DatanodeDescriptor[] targets; List chosenNodes = new ArrayList(); - excludedNodes = new HashMap(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + excludedNodes = new HashSet(); + excludedNodes.add(dataNodes[1]); targets = chooseTarget(0, chosenNodes, excludedNodes); assertEquals(targets.length, 0); excludedNodes.clear(); chosenNodes.clear(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + excludedNodes.add(dataNodes[1]); targets = chooseTarget(1, chosenNodes, excludedNodes); assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[0]); excludedNodes.clear(); chosenNodes.clear(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + excludedNodes.add(dataNodes[1]); targets = chooseTarget(2, chosenNodes, excludedNodes); assertEquals(targets.length, 2); assertEquals(targets[0], dataNodes[0]); @@ -236,7 +238,7 @@ public class TestReplicationPolicy { excludedNodes.clear(); chosenNodes.clear(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + excludedNodes.add(dataNodes[1]); targets = chooseTarget(3, chosenNodes, excludedNodes); assertEquals(targets.length, 3); assertEquals(targets[0], dataNodes[0]); @@ -245,7 +247,7 @@ public class TestReplicationPolicy { excludedNodes.clear(); chosenNodes.clear(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + excludedNodes.add(dataNodes[1]); targets = chooseTarget(4, chosenNodes, excludedNodes); assertEquals(targets.length, 4); assertEquals(targets[0], dataNodes[0]); @@ -258,7 +260,7 @@ public class TestReplicationPolicy { excludedNodes.clear(); chosenNodes.clear(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + excludedNodes.add(dataNodes[1]); chosenNodes.add(dataNodes[2]); targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true, excludedNodes, BLOCK_SIZE); @@ -479,8 +481,8 @@ public class TestReplicationPolicy { assertEquals(targets.length, 1); assertEquals(targets[0], dataNodes[1]); - HashMap excludedNodes = new HashMap(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + Set excludedNodes = new HashSet(); + excludedNodes.add(dataNodes[1]); List chosenNodes = new ArrayList(); targets = chooseTarget(1, chosenNodes, excludedNodes); assertEquals(targets.length, 1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java index ffea18524ab..5670c2074f9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java @@ -181,7 +181,7 @@ public class TestReplicationPolicyWithNodeGroup { int numOfReplicas, DatanodeDescriptor writer, List chosenNodes, - Map excludedNodes) { + Set excludedNodes) { return replicator.chooseTarget(filename, numOfReplicas, writer, chosenNodes, false, excludedNodes, BLOCK_SIZE); } @@ -252,14 +252,13 @@ public class TestReplicationPolicyWithNodeGroup { * @throws Exception */ @Test - public void testChooseTarget2() throws Exception { - HashMap excludedNodes; + public void testChooseTarget2() throws Exception { DatanodeDescriptor[] targets; BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator; List chosenNodes = new ArrayList(); - excludedNodes = new HashMap(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + Set excludedNodes = new HashSet(); + excludedNodes.add(dataNodes[1]); targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false, excludedNodes, BLOCK_SIZE); assertEquals(targets.length, 4); @@ -275,7 +274,7 @@ public class TestReplicationPolicyWithNodeGroup { excludedNodes.clear(); chosenNodes.clear(); - excludedNodes.put(dataNodes[1], dataNodes[1]); + excludedNodes.add(dataNodes[1]); chosenNodes.add(dataNodes[2]); targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true, excludedNodes, BLOCK_SIZE); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java index 8a3b52330b0..399dfc210d3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java @@ -25,7 +25,7 @@ import static org.mockito.Mockito.spy; import java.lang.reflect.Field; import java.util.EnumSet; -import java.util.HashMap; +import java.util.HashSet; import java.util.List; import org.apache.commons.logging.Log; @@ -119,7 +119,7 @@ public class TestAddBlockRetry { return ret; } }).when(spyBM).chooseTarget(Mockito.anyString(), Mockito.anyInt(), - Mockito.any(), Mockito.>any(), + Mockito.any(), Mockito.>any(), Mockito.anyLong(), Mockito.>any()); // create file