HDFS-14913. Correct the value of available count in DFSNetworkTopology#chooseRandomWithStorageType(). Contributed by Ayush Saxena.

This commit is contained in:
Ayush Saxena 2019-10-21 18:35:12 +05:30
parent 9434f36b43
commit 9067a11b0d
2 changed files with 28 additions and 2 deletions

View File

@ -212,8 +212,7 @@ public class DFSNetworkTopology extends NetworkTopology {
}
if (excludedNodes != null) {
for (Node excludedNode : excludedNodes) {
if (excludeRoot != null
&& excludedNode.getNetworkLocation().startsWith(excludedScope)) {
if (excludeRoot != null && isNodeInScope(excludedNode, excludedScope)) {
continue;
}
if (excludedNode instanceof DatanodeDescriptor) {
@ -259,6 +258,14 @@ public class DFSNetworkTopology extends NetworkTopology {
return chosen;
}
private boolean isNodeInScope(Node node, String scope) {
if (!scope.endsWith("/")) {
scope += "/";
}
String nodeLocation = node.getNetworkLocation() + "/";
return nodeLocation.startsWith(scope);
}
/**
* Choose a random node that has the required storage type, under the given
* root, with an excluded subtree root (could also just be a leaf node).

View File

@ -581,4 +581,23 @@ public class TestDFSNetworkTopology {
assertTrue(dd.getHostName().equals("host7"));
}
}
@Test
public void testChooseRandomWithStorageTypeNoAvlblNode() {
DFSNetworkTopology dfsCluster =
DFSNetworkTopology.getInstance(new Configuration());
final String[] racks = {"/default/rack1", "/default/rack10"};
final String[] hosts = {"host1", "host2"};
final StorageType[] types = {StorageType.DISK, StorageType.DISK};
final DatanodeStorageInfo[] storages =
DFSTestUtil.createDatanodeStorageInfos(2, racks, hosts, types);
DatanodeDescriptor[] dns = DFSTestUtil.toDatanodeDescriptor(storages);
dfsCluster.add(dns[0]);
dfsCluster.add(dns[1]);
HashSet<Node> excluded = new HashSet<>();
excluded.add(dns[1]);
Node n = dfsCluster.chooseRandomWithStorageType("/default",
"/default/rack1", excluded, StorageType.DISK);
assertNull("No node should have been selected.", n);
}
}