From b643a1cbe8a82ca331ffcd14fccc1dc0d90da5c7 Mon Sep 17 00:00:00 2001 From: Ayush Saxena Date: Tue, 5 Nov 2019 09:23:20 +0530 Subject: [PATCH] HDFS-14938. Add check if excludedNodes contain scope in DFSNetworkTopology#chooseRandomWithStorageType(). Contributed by Lisheng Sun. --- .../hadoop/hdfs/net/DFSNetworkTopology.java | 8 ++++- .../hdfs/net/TestDFSNetworkTopology.java | 31 +++++++++++++++++++ 2 files changed, 38 insertions(+), 1 deletion(-) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java index bd02768bda3..9082b910eb4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DFSNetworkTopology.java @@ -194,7 +194,13 @@ public class DFSNetworkTopology extends NetworkTopology { } if (!(node instanceof DFSTopologyNodeImpl)) { // a node is either DFSTopologyNodeImpl, or a DatanodeDescriptor - return ((DatanodeDescriptor)node).hasStorageType(type) ? node : null; + // if a node is DatanodeDescriptor and excludedNodes contains it, + // return null; + if (excludedNodes != null && excludedNodes.contains(node)) { + LOG.debug("{} in excludedNodes", node); + return null; + } + return ((DatanodeDescriptor) node).hasStorageType(type) ? node : null; } DFSTopologyNodeImpl root = (DFSTopologyNodeImpl)node; Node excludeRoot = excludedScope == null ? null : getNode(excludedScope); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java index 4baa8b7a846..013352394cc 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/net/TestDFSNetworkTopology.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdfs.net; import com.google.common.collect.Sets; +import org.apache.hadoop.hdfs.DFSConfigKeys; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.apache.hadoop.conf.Configuration; @@ -600,4 +601,34 @@ public class TestDFSNetworkTopology { "/default/rack1", excluded, StorageType.DISK); assertNull("No node should have been selected.", n); } + + /** + * Tests it should getting no node, if a node from scope is DatanodeDescriptor + * and excludedNodes contain it in + * DFSNetworkTopology#chooseRandomWithStorageType(). + */ + @Test + public void testChooseRandomWithStorageTypeScopeEqualsExcludedNodes() { + DFSNetworkTopology dfsCluster = + DFSNetworkTopology.getInstance(new Configuration()); + final String[] racks = {"/default/rack1", "/default/rack2"}; + final String[] hosts = {"host1", "host2"}; + final StorageType[] types = {StorageType.DISK, StorageType.DISK}; + DatanodeStorageInfo[] storages = new DatanodeStorageInfo[2]; + for (int i = 0; i < 2; i++) { + final String storageID = "s" + i; + final String ip = i + "." + i + "." + i + "." + i; + storages[i] = DFSTestUtil.createDatanodeStorageInfo(storageID, ip, + racks[i], hosts[i], types[i], null); + } + DatanodeDescriptor[] dns = DFSTestUtil.toDatanodeDescriptor(storages); + dfsCluster.add(dns[0]); + dfsCluster.add(dns[1]); + HashSet excluded = new HashSet<>(); + excluded.add(dns[0]); + Node n = dfsCluster.chooseRandomWithStorageType( + "/default/rack1/0.0.0.0:" + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT, + null, excluded, StorageType.DISK); + assertNull("No node should have been selected.", n); + } }