YARN-6188. Fix OOM issue with decommissioningNodesWatcher in the case of clusters with

large number of nodes (Contributed by Ajay Jadhav via Daniel Templeton)

(cherry picked from commit 585168cc1bdb87398d4f388f3eeccd1c71114982)
This commit is contained in:
Daniel Templeton 2017-02-17 13:11:43 -08:00
parent 06386b7e78
commit 10fd5627bb
1 changed files with 2 additions and 2 deletions

View File

@ -385,9 +385,9 @@ public class DecommissioningNodesWatcher {
if (!LOG.isDebugEnabled() || decomNodes.size() == 0) {
return;
}
StringBuilder sb = new StringBuilder();
long now = mclock.getTime();
for (DecommissioningNodeContext d : decomNodes.values()) {
StringBuilder sb = new StringBuilder();
DecommissioningNodeStatus s = checkDecommissioningStatus(d.nodeId);
sb.append(String.format(
"%n %-34s %4ds fresh:%3ds containers:%2d %14s",
@ -413,8 +413,8 @@ public class DecommissioningNodesWatcher {
(mclock.getTime() - rmApp.getStartTime()) / 1000));
}
}
LOG.debug("Decommissioning node: " + sb.toString());
}
LOG.info("Decommissioning Nodes: " + sb.toString());
}
// Read possible new DECOMMISSIONING_TIMEOUT_KEY from yarn-site.xml.