HDFS-14999. Avoid Potential Infinite Loop in DFSNetworkTopology. Contributed by Ayush Saxena.

(cherry picked from commit c84e6beada)
This commit is contained in:
Ayush Saxena 2020-05-18 21:06:46 +05:30 committed by S O'Donnell
parent dc830cf277
commit c93a2f6789
1 changed files with 32 additions and 19 deletions

View File

@ -249,17 +249,10 @@ Node chooseRandomWithStorageType(final String scope,
return null;
}
// to this point, it is guaranteed that there is at least one node
// that satisfies the requirement, keep trying until we found one.
Node chosen;
do {
chosen = chooseRandomWithStorageTypeAndExcludeRoot(root, excludeRoot,
type);
if (excludedNodes == null || !excludedNodes.contains(chosen)) {
break;
} else {
LOG.debug("Node {} is excluded, continuing.", chosen);
}
} while (true);
// that satisfies the requirement.
Node chosen =
chooseRandomWithStorageTypeAndExcludeRoot(root, excludeRoot, type,
excludedNodes);
LOG.debug("chooseRandom returning {}", chosen);
return chosen;
}
@ -277,23 +270,23 @@ private boolean isNodeInScope(Node node, String scope) {
* Choose a random node that has the required storage type, under the given
* root, with an excluded subtree root (could also just be a leaf node).
*
* Note that excludedNode is checked after a random node, so it is not being
* handled here.
*
* @param root the root node where we start searching for a datanode
* @param excludeRoot the root of the subtree what should be excluded
* @param type the expected storage type
* @param excludedNodes the list of nodes to be excluded
* @return a random datanode, with the storage type, and is not in excluded
* scope
*/
private Node chooseRandomWithStorageTypeAndExcludeRoot(
DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
DFSTopologyNodeImpl root, Node excludeRoot, StorageType type,
Collection<Node> excludedNodes) {
Node chosenNode;
if (root.isRack()) {
// children are datanode descriptor
ArrayList<Node> candidates = new ArrayList<>();
for (Node node : root.getChildren()) {
if (node.equals(excludeRoot)) {
if (node.equals(excludeRoot) || (excludedNodes != null && excludedNodes
.contains(node))) {
continue;
}
DatanodeDescriptor dnDescriptor = (DatanodeDescriptor)node;
@ -310,7 +303,7 @@ private Node chooseRandomWithStorageTypeAndExcludeRoot(
} else {
// the children are inner nodes
ArrayList<DFSTopologyNodeImpl> candidates =
getEligibleChildren(root, excludeRoot, type);
getEligibleChildren(root, excludeRoot, type, excludedNodes);
if (candidates.size() == 0) {
return null;
}
@ -339,7 +332,7 @@ private Node chooseRandomWithStorageTypeAndExcludeRoot(
}
DFSTopologyNodeImpl nextRoot = candidates.get(idxChosen);
chosenNode = chooseRandomWithStorageTypeAndExcludeRoot(
nextRoot, excludeRoot, type);
nextRoot, excludeRoot, type, excludedNodes);
}
return chosenNode;
}
@ -352,11 +345,13 @@ private Node chooseRandomWithStorageTypeAndExcludeRoot(
* @param root the subtree root we check.
* @param excludeRoot the root of the subtree that should be excluded.
* @param type the storage type we look for.
* @param excludedNodes the list of excluded nodes.
* @return a list of possible nodes, each of them is eligible as the next
* level root we search.
*/
private ArrayList<DFSTopologyNodeImpl> getEligibleChildren(
DFSTopologyNodeImpl root, Node excludeRoot, StorageType type) {
DFSTopologyNodeImpl root, Node excludeRoot, StorageType type,
Collection<Node> excludedNodes) {
ArrayList<DFSTopologyNodeImpl> candidates = new ArrayList<>();
int excludeCount = 0;
if (excludeRoot != null && root.isAncestor(excludeRoot)) {
@ -383,6 +378,24 @@ private ArrayList<DFSTopologyNodeImpl> getEligibleChildren(
(dfsNode.isAncestor(excludeRoot) || dfsNode.equals(excludeRoot))) {
storageCount -= excludeCount;
}
if (excludedNodes != null) {
for (Node excludedNode : excludedNodes) {
if (excludeRoot != null && isNodeInScope(excludedNode,
NodeBase.getPath(excludeRoot))) {
continue;
}
if (isNodeInScope(excludedNode, NodeBase.getPath(node))) {
if (excludedNode instanceof DatanodeDescriptor) {
storageCount -=
((DatanodeDescriptor) excludedNode).hasStorageType(type) ?
1 : 0;
} else if (excludedNode instanceof DFSTopologyNodeImpl) {
storageCount -= ((DFSTopologyNodeImpl) excludedNode)
.getSubtreeStorageCount(type);
}
}
}
}
if (storageCount > 0) {
candidates.add(dfsNode);
}