YARN-4315. NaN in Queue percentage for cluster apps page. (Bibin A Chundatt via wangda)

(cherry picked from commit 561abb9fee)
This commit is contained in:
Wangda Tan 2015-12-29 13:28:00 -08:00
parent 1f8162b74e
commit 3efbde5378
3 changed files with 52 additions and 10 deletions

View File

@ -1193,6 +1193,8 @@ Release 2.7.3 - UNRELEASED
YARN-4452. NPE when submit Unmanaged application. (Naganarasimha G R via YARN-4452. NPE when submit Unmanaged application. (Naganarasimha G R via
junping_du) junping_du)
YARN-4315. NaN in Queue percentage for cluster apps page. (Bibin A Chundatt via wangda)
Release 2.7.2 - UNRELEASED Release 2.7.2 - UNRELEASED
INCOMPATIBLE CHANGES INCOMPATIBLE CHANGES

View File

@ -694,17 +694,20 @@ public synchronized ApplicationResourceUsageReport getResourceUsageReport() {
Resources.clone(attemptResourceUsage.getReserved()); Resources.clone(attemptResourceUsage.getReserved());
Resource cluster = rmContext.getScheduler().getClusterResource(); Resource cluster = rmContext.getScheduler().getClusterResource();
ResourceCalculator calc = rmContext.getScheduler().getResourceCalculator(); ResourceCalculator calc = rmContext.getScheduler().getResourceCalculator();
float queueUsagePerc = calc.divide(cluster, usedResourceClone, Resources float queueUsagePerc = 0.0f;
.multiply(cluster, queue.getQueueInfo(false, false).getCapacity())) float clusterUsagePerc = 0.0f;
* 100; if (!calc.isInvalidDivisor(cluster)) {
float clusterUsagePerc = queueUsagePerc =
calc.divide(cluster, usedResourceClone, cluster) * 100; calc.divide(cluster, usedResourceClone, Resources.multiply(cluster,
queue.getQueueInfo(false, false).getCapacity())) * 100;
clusterUsagePerc = calc.divide(cluster, usedResourceClone, cluster) * 100;
}
return ApplicationResourceUsageReport.newInstance(liveContainers.size(), return ApplicationResourceUsageReport.newInstance(liveContainers.size(),
reservedContainers.size(), usedResourceClone, reservedResourceClone, reservedContainers.size(), usedResourceClone, reservedResourceClone,
Resources.add(usedResourceClone, reservedResourceClone), Resources.add(usedResourceClone, reservedResourceClone),
runningResourceUsage.getMemorySeconds(), runningResourceUsage.getMemorySeconds(),
runningResourceUsage.getVcoreSeconds(), runningResourceUsage.getVcoreSeconds(), queueUsagePerc,
queueUsagePerc, clusterUsagePerc); clusterUsagePerc);
} }
public synchronized Map<ContainerId, RMContainer> getLiveContainersMap() { public synchronized Map<ContainerId, RMContainer> getLiveContainersMap() {

View File

@ -17,10 +17,9 @@
*/ */
package org.apache.hadoop.yarn.server.resourcemanager.scheduler; package org.apache.hadoop.yarn.server.resourcemanager.scheduler;
import org.apache.hadoop.yarn.api.records.*;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertEquals;
import static org.mockito.Mockito.*; import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import java.util.Arrays; import java.util.Arrays;
import java.util.HashMap; import java.util.HashMap;
@ -28,6 +27,17 @@
import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
@ -212,4 +222,31 @@ public void testAppPercentages() throws Exception {
assertEquals(60.0f, assertEquals(60.0f,
app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f); app.getResourceUsageReport().getClusterUsagePercentage(), 0.01f);
} }
@Test
public void testAppPercentagesOnswitch() throws Exception {
FifoScheduler scheduler = mock(FifoScheduler.class);
when(scheduler.getClusterResource()).thenReturn(Resource.newInstance(0, 0));
when(scheduler.getResourceCalculator())
.thenReturn(new DefaultResourceCalculator());
ApplicationAttemptId appAttId = createAppAttemptId(0, 0);
RMContext rmContext = mock(RMContext.class);
when(rmContext.getEpoch()).thenReturn(3L);
when(rmContext.getScheduler()).thenReturn(scheduler);
final String user = "user1";
Queue queue = createQueue("test", null);
SchedulerApplicationAttempt app = new SchedulerApplicationAttempt(appAttId,
user, queue, queue.getActiveUsersManager(), rmContext);
// Resource request
Resource requestedResource = Resource.newInstance(1536, 2);
app.attemptResourceUsage.incUsed(requestedResource);
assertEquals(0.0f, app.getResourceUsageReport().getQueueUsagePercentage(),
0.0f);
assertEquals(0.0f, app.getResourceUsageReport().getClusterUsagePercentage(),
0.0f);
}
} }