HDFS-14938. Add check if excludedNodes contain scope in DFSNetworkTopology#chooseRandomWithStorageType(). Contributed by Lisheng Sun.

This commit is contained in:
Ayush Saxena 2019-11-05 09:23:20 +05:30
parent 7d0adddf09
commit b643a1cbe8
2 changed files with 38 additions and 1 deletions

View File

@ -194,7 +194,13 @@ public class DFSNetworkTopology extends NetworkTopology {
} }
if (!(node instanceof DFSTopologyNodeImpl)) { if (!(node instanceof DFSTopologyNodeImpl)) {
// a node is either DFSTopologyNodeImpl, or a DatanodeDescriptor // a node is either DFSTopologyNodeImpl, or a DatanodeDescriptor
return ((DatanodeDescriptor)node).hasStorageType(type) ? node : null; // if a node is DatanodeDescriptor and excludedNodes contains it,
// return null;
if (excludedNodes != null && excludedNodes.contains(node)) {
LOG.debug("{} in excludedNodes", node);
return null;
}
return ((DatanodeDescriptor) node).hasStorageType(type) ? node : null;
} }
DFSTopologyNodeImpl root = (DFSTopologyNodeImpl)node; DFSTopologyNodeImpl root = (DFSTopologyNodeImpl)node;
Node excludeRoot = excludedScope == null ? null : getNode(excludedScope); Node excludeRoot = excludedScope == null ? null : getNode(excludedScope);

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.hdfs.net; package org.apache.hadoop.hdfs.net;
import com.google.common.collect.Sets; import com.google.common.collect.Sets;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
@ -600,4 +601,34 @@ public class TestDFSNetworkTopology {
"/default/rack1", excluded, StorageType.DISK); "/default/rack1", excluded, StorageType.DISK);
assertNull("No node should have been selected.", n); assertNull("No node should have been selected.", n);
} }
/**
* Tests it should getting no node, if a node from scope is DatanodeDescriptor
* and excludedNodes contain it in
* DFSNetworkTopology#chooseRandomWithStorageType().
*/
@Test
public void testChooseRandomWithStorageTypeScopeEqualsExcludedNodes() {
DFSNetworkTopology dfsCluster =
DFSNetworkTopology.getInstance(new Configuration());
final String[] racks = {"/default/rack1", "/default/rack2"};
final String[] hosts = {"host1", "host2"};
final StorageType[] types = {StorageType.DISK, StorageType.DISK};
DatanodeStorageInfo[] storages = new DatanodeStorageInfo[2];
for (int i = 0; i < 2; i++) {
final String storageID = "s" + i;
final String ip = i + "." + i + "." + i + "." + i;
storages[i] = DFSTestUtil.createDatanodeStorageInfo(storageID, ip,
racks[i], hosts[i], types[i], null);
}
DatanodeDescriptor[] dns = DFSTestUtil.toDatanodeDescriptor(storages);
dfsCluster.add(dns[0]);
dfsCluster.add(dns[1]);
HashSet<Node> excluded = new HashSet<>();
excluded.add(dns[0]);
Node n = dfsCluster.chooseRandomWithStorageType(
"/default/rack1/0.0.0.0:" + DFSConfigKeys.DFS_DATANODE_DEFAULT_PORT,
null, excluded, StorageType.DISK);
assertNull("No node should have been selected.", n);
}
} }