YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug

log for overall resource usage by all containers.  Contributed by
Naganarasimha G R.

(cherry picked from commit 8badd82ce2)
This commit is contained in:
Devaraj K 2015-05-12 16:54:38 +05:30
parent cda1962996
commit f887243f88
2 changed files with 22 additions and 8 deletions

View File

@ -182,6 +182,9 @@ Release 2.8.0 - UNRELEASED
YARN-3587. Fix the javadoc of DelegationTokenSecretManager in yarn, etc. YARN-3587. Fix the javadoc of DelegationTokenSecretManager in yarn, etc.
projects. (Gabor Liptak via junping_du) projects. (Gabor Liptak via junping_du)
YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug
log for overall resource usage by all containers. (Naganarasimha G R via devaraj)
OPTIMIZATIONS OPTIMIZATIONS
YARN-3339. TestDockerContainerExecutor should pull a single image and not YARN-3339. TestDockerContainerExecutor should pull a single image and not

View File

@ -389,8 +389,10 @@ public class ContainersMonitorImpl extends AbstractService implements
// Now do the monitoring for the trackingContainers // Now do the monitoring for the trackingContainers
// Check memory usage and kill any overflowing containers // Check memory usage and kill any overflowing containers
long vmemStillInUsage = 0; long vmemUsageByAllContainers = 0;
long pmemStillInUsage = 0; long pmemByAllContainers = 0;
long cpuUsagePercentPerCoreByAllContainers = 0;
long cpuUsageTotalCoresByAllContainers = 0;
for (Iterator<Map.Entry<ContainerId, ProcessTreeInfo>> it = for (Iterator<Map.Entry<ContainerId, ProcessTreeInfo>> it =
trackingContainers.entrySet().iterator(); it.hasNext();) { trackingContainers.entrySet().iterator(); it.hasNext();) {
@ -504,6 +506,13 @@ public class ContainersMonitorImpl extends AbstractService implements
containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM; containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM;
} }
// Accounting the total memory in usage for all containers
vmemUsageByAllContainers += currentVmemUsage;
pmemByAllContainers += currentPmemUsage;
// Accounting the total cpu usage for all containers
cpuUsagePercentPerCoreByAllContainers += cpuUsagePercentPerCore;
cpuUsageTotalCoresByAllContainers += cpuUsagePercentPerCore;
if (isMemoryOverLimit) { if (isMemoryOverLimit) {
// Virtual or physical memory over limit. Fail the container and // Virtual or physical memory over limit. Fail the container and
// remove // remove
@ -520,12 +529,6 @@ public class ContainersMonitorImpl extends AbstractService implements
containerExitStatus, msg)); containerExitStatus, msg));
it.remove(); it.remove();
LOG.info("Removed ProcessTree with root " + pId); LOG.info("Removed ProcessTree with root " + pId);
} else {
// Accounting the total memory in usage for all containers that
// are still
// alive and within limits.
vmemStillInUsage += currentVmemUsage;
pmemStillInUsage += currentPmemUsage;
} }
} catch (Exception e) { } catch (Exception e) {
// Log the exception and proceed to the next container. // Log the exception and proceed to the next container.
@ -533,6 +536,14 @@ public class ContainersMonitorImpl extends AbstractService implements
+ "while managing memory of " + containerId, e); + "while managing memory of " + containerId, e);
} }
} }
if (LOG.isDebugEnabled()) {
LOG.debug("Total Resource Usage stats in NM by all containers : "
+ "Virtual Memory= " + vmemUsageByAllContainers
+ ", Physical Memory= " + pmemByAllContainers
+ ", Total CPU usage= " + cpuUsageTotalCoresByAllContainers
+ ", Total CPU(% per core) usage"
+ cpuUsagePercentPerCoreByAllContainers);
}
try { try {
Thread.sleep(monitoringInterval); Thread.sleep(monitoringInterval);