diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java index e487f698d09..413bbdf7db1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java @@ -18,13 +18,11 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; -import com.google.common.base.Preconditions; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceRequest; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.util.resource.Resources; @@ -52,8 +50,7 @@ public class ClusterNodeTracker { private Lock writeLock = readWriteLock.writeLock(); private HashMap nodes = new HashMap<>(); - private Map nodeNameToNodeMap = new HashMap<>(); - private Map> nodesPerRack = new HashMap<>(); + private Map nodesPerRack = new HashMap<>(); private Resource clusterCapacity = Resources.clone(Resources.none()); private Resource staleClusterCapacity = null; @@ -69,16 +66,14 @@ public void addNode(N node) { writeLock.lock(); try { nodes.put(node.getNodeID(), node); - nodeNameToNodeMap.put(node.getNodeName(), node); // Update nodes per rack as well String rackName = node.getRackName(); - List nodesList = nodesPerRack.get(rackName); - if (nodesList == null) { - nodesList = new ArrayList<>(); - nodesPerRack.put(rackName, nodesList); + Integer numNodes = nodesPerRack.get(rackName); + if (numNodes == null) { + numNodes = 0; } - nodesList.add(node); + nodesPerRack.put(rackName, ++numNodes); // Update cluster capacity Resources.addTo(clusterCapacity, node.getTotalResource()); @@ -131,8 +126,8 @@ public int nodeCount(String rackName) { readLock.lock(); String rName = rackName == null ? "NULL" : rackName; try { - List nodesList = nodesPerRack.get(rName); - return nodesList == null ? 0 : nodesList.size(); + Integer nodeCount = nodesPerRack.get(rName); + return nodeCount == null ? 0 : nodeCount; } finally { readLock.unlock(); } @@ -159,18 +154,14 @@ public N removeNode(NodeId nodeId) { LOG.warn("Attempting to remove a non-existent node " + nodeId); return null; } - nodeNameToNodeMap.remove(node.getNodeName()); // Update nodes per rack as well String rackName = node.getRackName(); - List nodesList = nodesPerRack.get(rackName); - if (nodesList == null) { - LOG.error("Attempting to remove node from an empty rack " + rackName); + Integer numNodes = nodesPerRack.get(rackName); + if (numNodes > 0) { + nodesPerRack.put(rackName, --numNodes); } else { - nodesList.remove(node); - if (nodesList.isEmpty()) { - nodesPerRack.remove(rackName); - } + LOG.error("Attempting to remove node from an empty rack " + rackName); } // Update cluster capacity @@ -306,29 +297,4 @@ public List sortedNodeList(Comparator comparator) { Collections.sort(sortedList, comparator); return sortedList; } - - /** - * Convenience method to return list of nodes corresponding to resourceName - * passed in the {@link ResourceRequest}. - * - * @param resourceName Host/rack name of the resource, or - * {@link ResourceRequest#ANY} - * @return list of nodes that match the resourceName - */ - public List getNodesByResourceName(final String resourceName) { - Preconditions.checkArgument( - resourceName != null && !resourceName.isEmpty()); - List retNodes = new ArrayList<>(); - if (ResourceRequest.ANY.equals(resourceName)) { - return getAllNodes(); - } else if (nodeNameToNodeMap.containsKey(resourceName)) { - retNodes.add(nodeNameToNodeMap.get(resourceName)); - } else if (nodesPerRack.containsKey(resourceName)) { - return nodesPerRack.get(resourceName); - } else { - LOG.info( - "Could not find a node matching given resourceName " + resourceName); - } - return retNodes; - } } \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestClusterNodeTracker.java deleted file mode 100644 index 7f527f1b432..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestClusterNodeTracker.java +++ /dev/null @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.yarn.server.resourcemanager.scheduler; - -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.api.records.ResourceRequest; -import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FSSchedulerNode; -import org.junit.Before; -import org.junit.Test; - -import java.util.List; - -import static org.junit.Assert.assertEquals; - -/** - * Test class to verify ClusterNodeTracker. Using FSSchedulerNode without - * loss of generality. - */ -public class TestClusterNodeTracker { - private ClusterNodeTracker nodeTracker = - new ClusterNodeTracker<>(); - - @Before - public void setup() { - List rmNodes = - MockNodes.newNodes(2, 4, Resource.newInstance(4096, 4)); - for (RMNode rmNode : rmNodes) { - nodeTracker.addNode(new FSSchedulerNode(rmNode, false)); - } - } - - @Test - public void testGetNodeCount() { - assertEquals("Incorrect number of nodes in the cluster", - 8, nodeTracker.nodeCount()); - - assertEquals("Incorrect number of nodes in each rack", - 4, nodeTracker.nodeCount("rack0")); - } - - @Test - public void testGetNodesForResourceName() throws Exception { - assertEquals("Incorrect number of nodes matching ANY", - 8, nodeTracker.getNodesByResourceName(ResourceRequest.ANY).size()); - - assertEquals("Incorrect number of nodes matching rack", - 4, nodeTracker.getNodesByResourceName("rack0").size()); - - assertEquals("Incorrect number of nodes matching node", - 1, nodeTracker.getNodesByResourceName("host0").size()); - } -} \ No newline at end of file