From f887243f884b137c398bc05f75f457f76c029f16 Mon Sep 17 00:00:00 2001 From: Devaraj K Date: Tue, 12 May 2015 16:54:38 +0530 Subject: [PATCH] YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug log for overall resource usage by all containers. Contributed by Naganarasimha G R. (cherry picked from commit 8badd82ce256e4dc8c234961120d62a88358ab39) --- hadoop-yarn-project/CHANGES.txt | 3 +++ .../monitor/ContainersMonitorImpl.java | 27 +++++++++++++------ 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 815d35c4575..69acb61ae09 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -182,6 +182,9 @@ Release 2.8.0 - UNRELEASED YARN-3587. Fix the javadoc of DelegationTokenSecretManager in yarn, etc. projects. (Gabor Liptak via junping_du) + YARN-3513. Remove unused variables in ContainersMonitorImpl and add debug + log for overall resource usage by all containers. (Naganarasimha G R via devaraj) + OPTIMIZATIONS YARN-3339. TestDockerContainerExecutor should pull a single image and not diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 76bbda12f6a..d1e5e01e4e8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -389,8 +389,10 @@ public class ContainersMonitorImpl extends AbstractService implements // Now do the monitoring for the trackingContainers // Check memory usage and kill any overflowing containers - long vmemStillInUsage = 0; - long pmemStillInUsage = 0; + long vmemUsageByAllContainers = 0; + long pmemByAllContainers = 0; + long cpuUsagePercentPerCoreByAllContainers = 0; + long cpuUsageTotalCoresByAllContainers = 0; for (Iterator> it = trackingContainers.entrySet().iterator(); it.hasNext();) { @@ -504,6 +506,13 @@ public class ContainersMonitorImpl extends AbstractService implements containerExitStatus = ContainerExitStatus.KILLED_EXCEEDED_PMEM; } + // Accounting the total memory in usage for all containers + vmemUsageByAllContainers += currentVmemUsage; + pmemByAllContainers += currentPmemUsage; + // Accounting the total cpu usage for all containers + cpuUsagePercentPerCoreByAllContainers += cpuUsagePercentPerCore; + cpuUsageTotalCoresByAllContainers += cpuUsagePercentPerCore; + if (isMemoryOverLimit) { // Virtual or physical memory over limit. Fail the container and // remove @@ -520,12 +529,6 @@ public class ContainersMonitorImpl extends AbstractService implements containerExitStatus, msg)); it.remove(); LOG.info("Removed ProcessTree with root " + pId); - } else { - // Accounting the total memory in usage for all containers that - // are still - // alive and within limits. - vmemStillInUsage += currentVmemUsage; - pmemStillInUsage += currentPmemUsage; } } catch (Exception e) { // Log the exception and proceed to the next container. @@ -533,6 +536,14 @@ public class ContainersMonitorImpl extends AbstractService implements + "while managing memory of " + containerId, e); } } + if (LOG.isDebugEnabled()) { + LOG.debug("Total Resource Usage stats in NM by all containers : " + + "Virtual Memory= " + vmemUsageByAllContainers + + ", Physical Memory= " + pmemByAllContainers + + ", Total CPU usage= " + cpuUsageTotalCoresByAllContainers + + ", Total CPU(% per core) usage" + + cpuUsagePercentPerCoreByAllContainers); + } try { Thread.sleep(monitoringInterval);