diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java index 0af8f142aed..d9025b84ea9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryEventHandler.java @@ -689,9 +689,9 @@ public class JobHistoryEventHandler extends AbstractService NormalizedResourceEvent normalizedResourceEvent = (NormalizedResourceEvent) event; if (normalizedResourceEvent.getTaskType() == TaskType.MAP) { - summary.setResourcesPerMap(normalizedResourceEvent.getMemory()); + summary.setResourcesPerMap((int) normalizedResourceEvent.getMemory()); } else if (normalizedResourceEvent.getTaskType() == TaskType.REDUCE) { - summary.setResourcesPerReduce(normalizedResourceEvent.getMemory()); + summary.setResourcesPerReduce((int) normalizedResourceEvent.getMemory()); } break; case JOB_INITED: diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index b4d2da2ea79..99facd485a4 100755 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -1441,7 +1441,7 @@ public abstract class TaskAttemptImpl implements } long duration = (taskAttempt.getFinishTime() - taskAttempt.getLaunchTime()); Resource allocatedResource = taskAttempt.container.getResource(); - int mbAllocated = allocatedResource.getMemory(); + int mbAllocated = (int) allocatedResource.getMemorySize(); int vcoresAllocated = allocatedResource.getVirtualCores(); int minSlotMemSize = taskAttempt.conf.getInt( YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java index 1a7d58f3e3f..e4384b3bcd8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java @@ -365,10 +365,10 @@ public class RMContainerAllocator extends RMContainerRequestor eventHandler.handle(new JobHistoryEvent(jobId, new NormalizedResourceEvent( org.apache.hadoop.mapreduce.TaskType.MAP, mapResourceRequest - .getMemory()))); + .getMemorySize()))); LOG.info("mapResourceRequest:" + mapResourceRequest); - if (mapResourceRequest.getMemory() > supportedMaxContainerCapability - .getMemory() + if (mapResourceRequest.getMemorySize() > supportedMaxContainerCapability + .getMemorySize() || mapResourceRequest.getVirtualCores() > supportedMaxContainerCapability .getVirtualCores()) { String diagMsg = @@ -382,7 +382,7 @@ public class RMContainerAllocator extends RMContainerRequestor } } // set the resources - reqEvent.getCapability().setMemory(mapResourceRequest.getMemory()); + reqEvent.getCapability().setMemory(mapResourceRequest.getMemorySize()); reqEvent.getCapability().setVirtualCores( mapResourceRequest.getVirtualCores()); scheduledRequests.addMap(reqEvent);//maps are immediately scheduled @@ -392,10 +392,10 @@ public class RMContainerAllocator extends RMContainerRequestor eventHandler.handle(new JobHistoryEvent(jobId, new NormalizedResourceEvent( org.apache.hadoop.mapreduce.TaskType.REDUCE, - reduceResourceRequest.getMemory()))); + reduceResourceRequest.getMemorySize()))); LOG.info("reduceResourceRequest:" + reduceResourceRequest); - if (reduceResourceRequest.getMemory() > supportedMaxContainerCapability - .getMemory() + if (reduceResourceRequest.getMemorySize() > supportedMaxContainerCapability + .getMemorySize() || reduceResourceRequest.getVirtualCores() > supportedMaxContainerCapability .getVirtualCores()) { String diagMsg = @@ -410,7 +410,7 @@ public class RMContainerAllocator extends RMContainerRequestor } } // set the resources - reqEvent.getCapability().setMemory(reduceResourceRequest.getMemory()); + reqEvent.getCapability().setMemory(reduceResourceRequest.getMemorySize()); reqEvent.getCapability().setVirtualCores( reduceResourceRequest.getVirtualCores()); if (reqEvent.getEarlierAttemptFailed()) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java index 39cb22ebaf1..4748dfdf00c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/ResourceCalculatorUtils.java @@ -20,43 +20,42 @@ package org.apache.hadoop.mapreduce.v2.app.rm; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SchedulerResourceTypes; -import org.apache.hadoop.yarn.util.Records; import java.util.EnumSet; public class ResourceCalculatorUtils { - public static int divideAndCeil(int a, int b) { + public static int divideAndCeil(long a, long b) { if (b == 0) { return 0; } - return (a + (b - 1)) / b; + return (int) ((a + (b - 1)) / b); } public static int computeAvailableContainers(Resource available, Resource required, EnumSet resourceTypes) { if (resourceTypes.contains(SchedulerResourceTypes.CPU)) { return Math.min( - calculateRatioOrMaxValue(available.getMemory(), required.getMemory()), + calculateRatioOrMaxValue(available.getMemorySize(), required.getMemorySize()), calculateRatioOrMaxValue(available.getVirtualCores(), required .getVirtualCores())); } return calculateRatioOrMaxValue( - available.getMemory(), required.getMemory()); + available.getMemorySize(), required.getMemorySize()); } public static int divideAndCeilContainers(Resource required, Resource factor, EnumSet resourceTypes) { if (resourceTypes.contains(SchedulerResourceTypes.CPU)) { - return Math.max(divideAndCeil(required.getMemory(), factor.getMemory()), + return Math.max(divideAndCeil(required.getMemorySize(), factor.getMemorySize()), divideAndCeil(required.getVirtualCores(), factor.getVirtualCores())); } - return divideAndCeil(required.getMemory(), factor.getMemory()); + return divideAndCeil(required.getMemorySize(), factor.getMemorySize()); } - private static int calculateRatioOrMaxValue(int numerator, int denominator) { + private static int calculateRatioOrMaxValue(long numerator, long denominator) { if (denominator == 0) { return Integer.MAX_VALUE; } - return numerator / denominator; + return (int) (numerator / denominator); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java index 0f112381810..4c6ee7284f2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java @@ -1794,7 +1794,7 @@ public class TestRecovery { int appAttemptId = 3; MRAppMetrics metrics = mock(MRAppMetrics.class); Resource minContainerRequirements = mock(Resource.class); - when(minContainerRequirements.getMemory()).thenReturn(1000); + when(minContainerRequirements.getMemorySize()).thenReturn(1000L); ClusterInfo clusterInfo = mock(ClusterInfo.class); AppContext appContext = mock(AppContext.class); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java index 98dffba4580..1af60d85bdf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java @@ -33,12 +33,6 @@ import java.util.HashMap; import java.util.Iterator; import java.util.Map; -import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent; -import org.apache.hadoop.mapreduce.v2.app.client.ClientService; -import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator; -import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent; -import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; -import org.apache.hadoop.yarn.util.resource.Resources; import org.junit.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileStatus; @@ -47,11 +41,9 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RawLocalFileSystem; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapTaskAttemptImpl; -import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobCounter; import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion; import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo; @@ -94,7 +86,6 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; -import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Event; @@ -313,7 +304,7 @@ public class TestTaskAttempt{ Assert.assertEquals(rta.getLaunchTime(), 10); Counters counters = job.getAllCounters(); - int memoryMb = containerResource.getMemory(); + int memoryMb = (int) containerResource.getMemorySize(); int vcores = containerResource.getVirtualCores(); Assert.assertEquals((int) Math.ceil((float) memoryMb / minContainerSize), counters.findCounter(JobCounter.SLOTS_MILLIS_MAPS).getValue()); @@ -577,7 +568,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx); TaskAttemptImpl taImpl = @@ -635,7 +626,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx); TaskAttemptImpl taImpl = @@ -699,7 +690,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx); TaskAttemptImpl taImpl = @@ -769,7 +760,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, @@ -826,7 +817,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx); TaskAttemptImpl taImpl = @@ -894,7 +885,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); setupTaskAttemptFinishingMonitor(eventHandler, jobConf, appCtx); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, @@ -1054,7 +1045,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), @@ -1108,7 +1099,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), @@ -1165,7 +1156,7 @@ public class TestTaskAttempt{ ClusterInfo clusterInfo = mock(ClusterInfo.class); Resource resource = mock(Resource.class); when(appCtx.getClusterInfo()).thenReturn(clusterInfo); - when(resource.getMemory()).thenReturn(1024); + when(resource.getMemorySize()).thenReturn(1024L); TaskAttemptImpl taImpl = new MapTaskAttemptImpl(taskId, 1, eventHandler, jobFile, 1, splits, jobConf, taListener, new Token(), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java index 0f7dc87f1ba..f9e4595b7f9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java @@ -201,7 +201,7 @@ public class TestLocalContainerAllocator { Container container = containerAssignedCaptor.getValue().getContainer(); Resource containerResource = container.getResource(); Assert.assertNotNull(containerResource); - Assert.assertEquals(containerResource.getMemory(), 0); + Assert.assertEquals(containerResource.getMemorySize(), 0); Assert.assertEquals(containerResource.getVirtualCores(), 0); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java index bc901903cbc..474e119a118 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestRMContainerAllocator.java @@ -1771,7 +1771,7 @@ public class TestRMContainerAllocator { when(excessC.getId()).thenReturn(containerId); when(excessC.getPriority()).thenReturn(RMContainerAllocator.PRIORITY_REDUCE); Resource mockR = mock(Resource.class); - when(mockR.getMemory()).thenReturn(2048); + when(mockR.getMemorySize()).thenReturn(2048L); when(excessC.getResource()).thenReturn(mockR); NodeId nId = mock(NodeId.class); when(nId.getHost()).thenReturn("local"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java index d87f6db6d61..cab8f544416 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/rm/TestResourceCalculatorUtils.java @@ -47,8 +47,8 @@ public class TestResourceCalculatorUtils { Integer.MAX_VALUE, expectedNumberOfContainersForCPU); - Resource zeroCpuResource = Resource.newInstance(nonZeroResource.getMemory(), - 0); + Resource zeroCpuResource = Resource.newInstance( + nonZeroResource.getMemorySize(), 0); verifyDifferentResourceTypes(clusterAvailableResources, zeroCpuResource, expectedNumberOfContainersForMemory, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java index d895c3b3b86..ca38babbe4f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java @@ -521,13 +521,13 @@ public class TypeConverter { application.getApplicationResourceUsageReport(); if (resourceUsageReport != null) { jobStatus.setNeededMem( - resourceUsageReport.getNeededResources().getMemory()); + resourceUsageReport.getNeededResources().getMemorySize()); jobStatus.setNumReservedSlots( resourceUsageReport.getNumReservedContainers()); jobStatus.setNumUsedSlots(resourceUsageReport.getNumUsedContainers()); jobStatus.setReservedMem( - resourceUsageReport.getReservedResources().getMemory()); - jobStatus.setUsedMem(resourceUsageReport.getUsedResources().getMemory()); + resourceUsageReport.getReservedResources().getMemorySize()); + jobStatus.setUsedMem(resourceUsageReport.getUsedResources().getMemorySize()); } return jobStatus; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java index 9ff75b9801b..7438296d657 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java @@ -95,9 +95,9 @@ public class JobStatus implements Writable, Cloneable { private String trackingUrl =""; private int numUsedSlots; private int numReservedSlots; - private int usedMem; - private int reservedMem; - private int neededMem; + private long usedMem; + private long reservedMem; + private long neededMem; private boolean isUber; /** @@ -580,42 +580,42 @@ public class JobStatus implements Writable, Cloneable { /** * @return the used memory */ - public int getUsedMem() { + public long getUsedMem() { return usedMem; } /** * @param m the used memory */ - public void setUsedMem(int m) { + public void setUsedMem(long m) { this.usedMem = m; } /** * @return the reserved memory */ - public int getReservedMem() { + public long getReservedMem() { return reservedMem; } /** * @param r the reserved memory */ - public void setReservedMem(int r) { + public void setReservedMem(long r) { this.reservedMem = r; } /** * @return the needed memory */ - public int getNeededMem() { + public long getNeededMem() { return neededMem; } /** * @param n the needed memory */ - public void setNeededMem(int n) { + public void setNeededMem(long n) { this.neededMem = n; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java index b8f049c0775..eead9cf03df 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java @@ -28,7 +28,7 @@ import org.apache.hadoop.mapreduce.TaskType; @InterfaceAudience.Private @InterfaceStability.Unstable public class NormalizedResourceEvent implements HistoryEvent { - private int memory; + private long memory; private TaskType taskType; /** @@ -36,7 +36,7 @@ public class NormalizedResourceEvent implements HistoryEvent { * @param taskType the tasktype of the request. * @param memory the normalized memory requirements. */ - public NormalizedResourceEvent(TaskType taskType, int memory) { + public NormalizedResourceEvent(TaskType taskType, long memory) { this.memory = memory; this.taskType = taskType; } @@ -53,7 +53,7 @@ public class NormalizedResourceEvent implements HistoryEvent { * the normalized memory * @return the normalized memory */ - public int getMemory() { + public long getMemory() { return this.memory; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java index a879098264f..3ae59ede18d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java @@ -779,9 +779,10 @@ public class CLI extends Configured implements Tool { for (JobStatus job : jobs) { int numUsedSlots = job.getNumUsedSlots(); int numReservedSlots = job.getNumReservedSlots(); - int usedMem = job.getUsedMem(); - int rsvdMem = job.getReservedMem(); - int neededMem = job.getNeededMem(); + + long usedMem = job.getUsedMem(); + long rsvdMem = job.getReservedMem(); + long neededMem = job.getNeededMem(); writer.printf(dataPattern, job.getJobID().toString(), job.getState(), job.getStartTime(), job.getUsername(), job.getQueue(), diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java index 84b76bfbcf6..4895a5b036b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/JobClientUnitTest.java @@ -170,9 +170,9 @@ public class JobClientUnitTest { when(mockJobStatus.getPriority()).thenReturn(JobPriority.NORMAL); when(mockJobStatus.getNumUsedSlots()).thenReturn(1); when(mockJobStatus.getNumReservedSlots()).thenReturn(1); - when(mockJobStatus.getUsedMem()).thenReturn(1024); - when(mockJobStatus.getReservedMem()).thenReturn(512); - when(mockJobStatus.getNeededMem()).thenReturn(2048); + when(mockJobStatus.getUsedMem()).thenReturn(1024L); + when(mockJobStatus.getReservedMem()).thenReturn(512L); + when(mockJobStatus.getNeededMem()).thenReturn(2048L); when(mockJobStatus.getSchedulingInfo()).thenReturn("NA"); Job mockJob = mock(Job.class); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java index d100e1de583..3b539fa6be4 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/FairSchedulerMetrics.java @@ -67,10 +67,10 @@ public class FairSchedulerMetrics extends SchedulerMetrics { FairScheduler fair = (FairScheduler) scheduler; final FSAppAttempt app = fair.getSchedulerApp(appAttemptId); metrics.register("variable.app." + oldAppId + ".demand.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return app.getDemand().getMemory(); + public Long getValue() { + return app.getDemand().getMemorySize(); } } ); @@ -83,10 +83,10 @@ public class FairSchedulerMetrics extends SchedulerMetrics { } ); metrics.register("variable.app." + oldAppId + ".usage.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return app.getResourceUsage().getMemory(); + public Long getValue() { + return app.getResourceUsage().getMemorySize(); } } ); @@ -99,26 +99,26 @@ public class FairSchedulerMetrics extends SchedulerMetrics { } ); metrics.register("variable.app." + oldAppId + ".minshare.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return app.getMinShare().getMemory(); + public Long getValue() { + return app.getMinShare().getMemorySize(); } } ); metrics.register("variable.app." + oldAppId + ".minshare.vcores", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return app.getMinShare().getMemory(); + public Long getValue() { + return app.getMinShare().getMemorySize(); } } ); metrics.register("variable.app." + oldAppId + ".maxshare.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return Math.min(app.getMaxShare().getMemory(), totalMemoryMB); + public Long getValue() { + return Math.min(app.getMaxShare().getMemorySize(), totalMemoryMB); } } ); @@ -154,10 +154,10 @@ public class FairSchedulerMetrics extends SchedulerMetrics { FairScheduler fair = (FairScheduler) scheduler; final FSQueue queue = fair.getQueueManager().getQueue(queueName); metrics.register("variable.queue." + queueName + ".demand.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return queue.getDemand().getMemory(); + public Long getValue() { + return queue.getDemand().getMemorySize(); } } ); @@ -170,10 +170,10 @@ public class FairSchedulerMetrics extends SchedulerMetrics { } ); metrics.register("variable.queue." + queueName + ".usage.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return queue.getResourceUsage().getMemory(); + public Long getValue() { + return queue.getResourceUsage().getMemorySize(); } } ); @@ -186,10 +186,10 @@ public class FairSchedulerMetrics extends SchedulerMetrics { } ); metrics.register("variable.queue." + queueName + ".minshare.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return queue.getMinShare().getMemory(); + public Long getValue() { + return queue.getMinShare().getMemorySize(); } } ); @@ -202,9 +202,9 @@ public class FairSchedulerMetrics extends SchedulerMetrics { } ); metrics.register("variable.queue." + queueName + ".maxshare.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { + public Long getValue() { if (! maxReset && SLSRunner.simulateInfoMap.containsKey("Number of nodes") && SLSRunner.simulateInfoMap.containsKey("Node memory (MB)") && @@ -221,7 +221,7 @@ public class FairSchedulerMetrics extends SchedulerMetrics { maxReset = false; } - return Math.min(queue.getMaxShare().getMemory(), totalMemoryMB); + return Math.min(queue.getMaxShare().getMemorySize(), totalMemoryMB); } } ); @@ -234,10 +234,10 @@ public class FairSchedulerMetrics extends SchedulerMetrics { } ); metrics.register("variable.queue." + queueName + ".fairshare.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { - return queue.getFairShare().getMemory(); + public Long getValue() { + return queue.getFairShare().getMemorySize(); } } ); diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java index 8835deb7efa..1a936abb468 100644 --- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java +++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/scheduler/ResourceSchedulerWrapper.java @@ -267,7 +267,7 @@ public class ResourceSchedulerWrapper // should have one container which is AM container RMContainer rmc = app.getLiveContainers().iterator().next(); updateQueueMetrics(queue, - rmc.getContainer().getResource().getMemory(), + rmc.getContainer().getResource().getMemorySize(), rmc.getContainer().getResource().getVirtualCores()); } } @@ -323,7 +323,7 @@ public class ResourceSchedulerWrapper if (status.getExitStatus() == ContainerExitStatus.SUCCESS) { for (RMContainer rmc : app.getLiveContainers()) { if (rmc.getContainerId() == containerId) { - releasedMemory += rmc.getContainer().getResource().getMemory(); + releasedMemory += rmc.getContainer().getResource().getMemorySize(); releasedVCores += rmc.getContainer() .getResource().getVirtualCores(); break; @@ -332,7 +332,7 @@ public class ResourceSchedulerWrapper } else if (status.getExitStatus() == ContainerExitStatus.ABORTED) { if (preemptionContainerMap.containsKey(containerId)) { Resource preResource = preemptionContainerMap.get(containerId); - releasedMemory += preResource.getMemory(); + releasedMemory += preResource.getMemorySize(); releasedVCores += preResource.getVirtualCores(); preemptionContainerMap.remove(containerId); } @@ -423,9 +423,9 @@ public class ResourceSchedulerWrapper "counter.queue." + queueName + ".pending.cores", "counter.queue." + queueName + ".allocated.memory", "counter.queue." + queueName + ".allocated.cores"}; - int values[] = new int[]{pendingResource.getMemory(), + long values[] = new long[]{pendingResource.getMemorySize(), pendingResource.getVirtualCores(), - allocatedResource.getMemory(), allocatedResource.getVirtualCores()}; + allocatedResource.getMemorySize(), allocatedResource.getVirtualCores()}; for (int i = names.length - 1; i >= 0; i --) { if (! counterMap.containsKey(names[i])) { metrics.counter(names[i]); @@ -531,11 +531,11 @@ public class ResourceSchedulerWrapper private void registerClusterResourceMetrics() { metrics.register("variable.cluster.allocated.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { + public Long getValue() { if(scheduler == null || scheduler.getRootQueueMetrics() == null) { - return 0; + return 0L; } else { return scheduler.getRootQueueMetrics().getAllocatedMB(); } @@ -543,11 +543,11 @@ public class ResourceSchedulerWrapper } ); metrics.register("variable.cluster.allocated.vcores", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { + public Long getValue() { if(scheduler == null || scheduler.getRootQueueMetrics() == null) { - return 0; + return 0L; } else { return scheduler.getRootQueueMetrics().getAllocatedVirtualCores(); } @@ -555,11 +555,11 @@ public class ResourceSchedulerWrapper } ); metrics.register("variable.cluster.available.memory", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { + public Long getValue() { if(scheduler == null || scheduler.getRootQueueMetrics() == null) { - return 0; + return 0L; } else { return scheduler.getRootQueueMetrics().getAvailableMB(); } @@ -567,11 +567,11 @@ public class ResourceSchedulerWrapper } ); metrics.register("variable.cluster.available.vcores", - new Gauge() { + new Gauge() { @Override - public Integer getValue() { + public Long getValue() { if(scheduler == null || scheduler.getRootQueueMetrics() == null) { - return 0; + return 0L; } else { return scheduler.getRootQueueMetrics().getAvailableVirtualCores(); } @@ -749,7 +749,7 @@ public class ResourceSchedulerWrapper } private void updateQueueMetrics(String queue, - int releasedMemory, int releasedVCores) { + long releasedMemory, int releasedVCores) { // update queue counters SortedMap counterMap = metrics.getCounters(); if (releasedMemory != 0) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index 88b57f1c68f..e2d847284a1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -18,7 +18,9 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.classification.InterfaceStability.Evolving; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.ApplicationMasterProtocol; @@ -53,7 +55,7 @@ public abstract class Resource implements Comparable { @Public @Stable - public static Resource newInstance(int memory, int vCores) { + public static Resource newInstance(long memory, long vCores) { Resource resource = Records.newRecord(Resource.class); resource.setMemory(memory); resource.setVirtualCores(vCores); @@ -61,12 +63,23 @@ public abstract class Resource implements Comparable { } /** + * This method is DEPRECATED: + * Use {@link Resource#getMemorySize()} instead + * * Get memory of the resource. * @return memory of the resource */ @Public - @Stable + @Deprecated public abstract int getMemory(); + + /** + * Get memory of the resource. + * @return memory of the resource + */ + @Private + @Unstable + public abstract long getMemorySize(); /** * Set memory of the resource. @@ -74,7 +87,7 @@ public abstract class Resource implements Comparable { */ @Public @Stable - public abstract void setMemory(int memory); + public abstract void setMemory(long memory); /** @@ -90,6 +103,10 @@ public abstract class Resource implements Comparable { @Public @Evolving public abstract int getVirtualCores(); + + @Public + @Unstable + public abstract long getVirtualCoresSize(); /** * Set number of virtual cpu cores of the resource. @@ -103,13 +120,14 @@ public abstract class Resource implements Comparable { */ @Public @Evolving - public abstract void setVirtualCores(int vCores); + public abstract void setVirtualCores(long vCores); @Override public int hashCode() { final int prime = 263167; - int result = 3571; - result = 939769357 + getMemory(); // prime * result = 939769357 initially + + int result = (int) (939769357 + + getMemorySize()); // prime * result = 939769357 initially result = prime * result + getVirtualCores(); return result; } @@ -123,7 +141,7 @@ public abstract class Resource implements Comparable { if (!(obj instanceof Resource)) return false; Resource other = (Resource) obj; - if (getMemory() != other.getMemory() || + if (getMemorySize() != other.getMemorySize() || getVirtualCores() != other.getVirtualCores()) { return false; } @@ -132,6 +150,6 @@ public abstract class Resource implements Comparable { @Override public String toString() { - return ""; + return ""; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 883aae93fce..81fae5820b5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -54,8 +54,8 @@ message ContainerIdProto { } message ResourceProto { - optional int32 memory = 1; - optional int32 virtual_cores = 2; + optional int64 memory = 1; + optional int64 virtual_cores = 2; } message ResourceUtilizationProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index e72adf534ee..5e2c90b4291 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -224,7 +224,7 @@ public class ApplicationMaster { @VisibleForTesting protected int numTotalContainers = 1; // Memory to request for the container on which the shell command will run - private int containerMemory = 10; + private long containerMemory = 10; // VirtualCores to request for the container on which the shell command will run private int containerVirtualCores = 1; // Priority of the request @@ -631,7 +631,7 @@ public class ApplicationMaster { appMasterTrackingUrl); // Dump out information about cluster capability as seen by the // resource manager - int maxMem = response.getMaximumResourceCapability().getMemory(); + long maxMem = response.getMaximumResourceCapability().getMemorySize(); LOG.info("Max mem capability of resources in this cluster " + maxMem); int maxVCores = response.getMaximumResourceCapability().getVirtualCores(); @@ -861,7 +861,7 @@ public class ApplicationMaster { + ":" + allocatedContainer.getNodeId().getPort() + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + ", containerResourceMemory" - + allocatedContainer.getResource().getMemory() + + allocatedContainer.getResource().getMemorySize() + ", containerResourceVirtualCores" + allocatedContainer.getResource().getVirtualCores()); // + ", containerToken" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index 9139b08e530..5adc37d825c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -127,7 +127,7 @@ public class Client { // Queue for App master private String amQueue = ""; // Amt. of memory resource to request for to run the App Master - private int amMemory = 10; + private long amMemory = 10; // Amt. of virtual core resource to request for to run the App Master private int amVCores = 1; @@ -520,7 +520,7 @@ public class Client { // the required resources from the RM for the app master // Memory ask has to be a multiple of min and less than max. // Dump out information about cluster capability as seen by the resource manager - int maxMem = appResponse.getMaximumResourceCapability().getMemory(); + long maxMem = appResponse.getMaximumResourceCapability().getMemorySize(); LOG.info("Max mem capability of resources in this cluster " + maxMem); // A resource ask cannot exceed the max. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java index 46ddc4dead7..4366c25baf2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/api/impl/AMRMClientImpl.java @@ -122,10 +122,10 @@ public class AMRMClientImpl extends AMRMClient { class ResourceReverseMemoryThenCpuComparator implements Comparator { @Override public int compare(Resource arg0, Resource arg1) { - int mem0 = arg0.getMemory(); - int mem1 = arg1.getMemory(); - int cpu0 = arg0.getVirtualCores(); - int cpu1 = arg1.getVirtualCores(); + long mem0 = arg0.getMemorySize(); + long mem1 = arg1.getMemorySize(); + long cpu0 = arg0.getVirtualCores(); + long cpu1 = arg1.getVirtualCores(); if(mem0 == mem1) { if(cpu0 == cpu1) { return 0; @@ -143,10 +143,10 @@ public class AMRMClientImpl extends AMRMClient { } static boolean canFit(Resource arg0, Resource arg1) { - int mem0 = arg0.getMemory(); - int mem1 = arg1.getMemory(); - int cpu0 = arg0.getVirtualCores(); - int cpu1 = arg1.getVirtualCores(); + long mem0 = arg0.getMemorySize(); + long mem1 = arg1.getMemorySize(); + long cpu0 = arg0.getVirtualCores(); + long cpu1 = arg1.getVirtualCores(); return (mem0 <= mem1 && cpu0 <= cpu1); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java index c1128572650..a89551f9c51 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/NodeCLI.java @@ -275,9 +275,9 @@ public class NodeCLI extends YarnCLI { nodeReportStr.println(nodeReport.getNumContainers()); nodeReportStr.print("\tMemory-Used : "); nodeReportStr.println((nodeReport.getUsed() == null) ? "0MB" - : (nodeReport.getUsed().getMemory() + "MB")); + : (nodeReport.getUsed().getMemorySize() + "MB")); nodeReportStr.print("\tMemory-Capacity : "); - nodeReportStr.println(nodeReport.getCapability().getMemory() + "MB"); + nodeReportStr.println(nodeReport.getCapability().getMemorySize() + "MB"); nodeReportStr.print("\tCPU-Used : "); nodeReportStr.println((nodeReport.getUsed() == null) ? "0 vcores" : (nodeReport.getUsed().getVirtualCores() + " vcores")); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java index 8c9f1d9ce35..2e51f492447 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/cli/TopCLI.java @@ -158,7 +158,7 @@ public class TopCLI extends YarnCLI { displayStringsMap.put(Columns.VCORES, String.valueOf(usedVirtualCores)); usedMemory = appReport.getApplicationResourceUsageReport().getUsedResources() - .getMemory() / 1024; + .getMemorySize() / 1024; displayStringsMap.put(Columns.MEM, String.valueOf(usedMemory) + "G"); reservedVirtualCores = appReport.getApplicationResourceUsageReport().getReservedResources() @@ -167,7 +167,7 @@ public class TopCLI extends YarnCLI { String.valueOf(reservedVirtualCores)); reservedMemory = appReport.getApplicationResourceUsageReport().getReservedResources() - .getMemory() / 1024; + .getMemorySize() / 1024; displayStringsMap.put(Columns.RMEM, String.valueOf(reservedMemory) + "G"); attempts = appReport.getCurrentApplicationAttemptId().getAttemptId(); nodes = 0; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java index df1b96b427a..9ce73887859 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestYarnClient.java @@ -1209,7 +1209,7 @@ public class TestYarnClient { for (attempts = 10; attempts > 0; attempts--) { if (cluster.getResourceManager().getRMContext().getReservationSystem() .getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity() - .getMemory() > 6000) { + .getMemorySize() > 6000) { break; } try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java index a28c6ed3305..937941ae548 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ResourcePBImpl.java @@ -53,39 +53,49 @@ public class ResourcePBImpl extends Resource { } viaProto = false; } - - + @Override + @SuppressWarnings("deprecation") public int getMemory() { - ResourceProtoOrBuilder p = viaProto ? proto : builder; - return (p.getMemory()); + return (int) getMemorySize(); } @Override - public void setMemory(int memory) { + public long getMemorySize() { + ResourceProtoOrBuilder p = viaProto ? proto : builder; + return p.getMemory(); + } + + @Override + public void setMemory(long memory) { maybeInitBuilder(); - builder.setMemory((memory)); + builder.setMemory(memory); } @Override public int getVirtualCores() { - ResourceProtoOrBuilder p = viaProto ? proto : builder; - return (p.getVirtualCores()); + return (int) getVirtualCoresSize(); } @Override - public void setVirtualCores(int vCores) { + public long getVirtualCoresSize() { + ResourceProtoOrBuilder p = viaProto ? proto : builder; + return p.getVirtualCores(); + } + + @Override + public void setVirtualCores(long vCores) { maybeInitBuilder(); - builder.setVirtualCores((vCores)); + builder.setVirtualCores(vCores); } @Override public int compareTo(Resource other) { - int diff = this.getMemory() - other.getMemory(); + long diff = this.getMemorySize() - other.getMemorySize(); if (diff == 0) { diff = this.getVirtualCores() - other.getVirtualCores(); } - return diff; + return diff == 0 ? 0 : (diff > 0 ? 1 : -1); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java index 2fdf214d2ae..b98cf0c518c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DefaultResourceCalculator.java @@ -28,13 +28,13 @@ public class DefaultResourceCalculator extends ResourceCalculator { @Override public int compare(Resource unused, Resource lhs, Resource rhs) { // Only consider memory - return lhs.getMemory() - rhs.getMemory(); + return Long.compare(lhs.getMemorySize(), rhs.getMemorySize()); } @Override - public int computeAvailableContainers(Resource available, Resource required) { + public long computeAvailableContainers(Resource available, Resource required) { // Only consider memory - return available.getMemory() / required.getMemory(); + return available.getMemorySize() / required.getMemorySize(); } @Override @@ -44,7 +44,7 @@ public class DefaultResourceCalculator extends ResourceCalculator { } public boolean isInvalidDivisor(Resource r) { - if (r.getMemory() == 0.0f) { + if (r.getMemorySize() == 0.0f) { return true; } return false; @@ -52,23 +52,23 @@ public class DefaultResourceCalculator extends ResourceCalculator { @Override public float ratio(Resource a, Resource b) { - return (float)a.getMemory() / b.getMemory(); + return (float)a.getMemorySize() / b.getMemorySize(); } @Override - public Resource divideAndCeil(Resource numerator, int denominator) { + public Resource divideAndCeil(Resource numerator, long denominator) { return Resources.createResource( - divideAndCeil(numerator.getMemory(), denominator)); + divideAndCeil(numerator.getMemorySize(), denominator)); } @Override public Resource normalize(Resource r, Resource minimumResource, Resource maximumResource, Resource stepFactor) { - int normalizedMemory = Math.min( + long normalizedMemory = Math.min( roundUp( - Math.max(r.getMemory(), minimumResource.getMemory()), - stepFactor.getMemory()), - maximumResource.getMemory()); + Math.max(r.getMemorySize(), minimumResource.getMemorySize()), + stepFactor.getMemorySize()), + maximumResource.getMemorySize()); return Resources.createResource(normalizedMemory); } @@ -81,22 +81,22 @@ public class DefaultResourceCalculator extends ResourceCalculator { @Override public Resource roundUp(Resource r, Resource stepFactor) { return Resources.createResource( - roundUp(r.getMemory(), stepFactor.getMemory()) + roundUp(r.getMemorySize(), stepFactor.getMemorySize()) ); } @Override public Resource roundDown(Resource r, Resource stepFactor) { return Resources.createResource( - roundDown(r.getMemory(), stepFactor.getMemory())); + roundDown(r.getMemorySize(), stepFactor.getMemorySize())); } @Override public Resource multiplyAndNormalizeUp(Resource r, double by, Resource stepFactor) { return Resources.createResource( - roundUp((int)(r.getMemory() * by + 0.5), stepFactor.getMemory()) - ); + roundUp((long) (r.getMemorySize() * by + 0.5), + stepFactor.getMemorySize())); } @Override @@ -104,8 +104,8 @@ public class DefaultResourceCalculator extends ResourceCalculator { Resource stepFactor) { return Resources.createResource( roundDown( - (int)(r.getMemory() * by), - stepFactor.getMemory() + (long)(r.getMemorySize() * by), + stepFactor.getMemorySize() ) ); } @@ -113,6 +113,6 @@ public class DefaultResourceCalculator extends ResourceCalculator { @Override public boolean fitsIn(Resource cluster, Resource smaller, Resource bigger) { - return smaller.getMemory() <= bigger.getMemory(); + return smaller.getMemorySize() <= bigger.getMemorySize(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java index b5c996766ff..8f97fab0323 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/DominantResourceCalculator.java @@ -54,15 +54,15 @@ public class DominantResourceCalculator extends ResourceCalculator { } if (isInvalidDivisor(clusterResource)) { - if ((lhs.getMemory() < rhs.getMemory() && lhs.getVirtualCores() > rhs + if ((lhs.getMemorySize() < rhs.getMemorySize() && lhs.getVirtualCores() > rhs .getVirtualCores()) - || (lhs.getMemory() > rhs.getMemory() && lhs.getVirtualCores() < rhs + || (lhs.getMemorySize() > rhs.getMemorySize() && lhs.getVirtualCores() < rhs .getVirtualCores())) { return 0; - } else if (lhs.getMemory() > rhs.getMemory() + } else if (lhs.getMemorySize() > rhs.getMemorySize() || lhs.getVirtualCores() > rhs.getVirtualCores()) { return 1; - } else if (lhs.getMemory() < rhs.getMemory() + } else if (lhs.getMemorySize() < rhs.getMemorySize() || lhs.getVirtualCores() < rhs.getVirtualCores()) { return -1; } @@ -100,20 +100,20 @@ public class DominantResourceCalculator extends ResourceCalculator { // Just use 'dominant' resource return (dominant) ? Math.max( - (float)resource.getMemory() / clusterResource.getMemory(), + (float)resource.getMemorySize() / clusterResource.getMemorySize(), (float)resource.getVirtualCores() / clusterResource.getVirtualCores() ) : Math.min( - (float)resource.getMemory() / clusterResource.getMemory(), + (float)resource.getMemorySize() / clusterResource.getMemorySize(), (float)resource.getVirtualCores() / clusterResource.getVirtualCores() ); } @Override - public int computeAvailableContainers(Resource available, Resource required) { + public long computeAvailableContainers(Resource available, Resource required) { return Math.min( - available.getMemory() / required.getMemory(), + available.getMemorySize() / required.getMemorySize(), available.getVirtualCores() / required.getVirtualCores()); } @@ -127,7 +127,7 @@ public class DominantResourceCalculator extends ResourceCalculator { @Override public boolean isInvalidDivisor(Resource r) { - if (r.getMemory() == 0.0f || r.getVirtualCores() == 0.0f) { + if (r.getMemorySize() == 0.0f || r.getVirtualCores() == 0.0f) { return true; } return false; @@ -136,15 +136,15 @@ public class DominantResourceCalculator extends ResourceCalculator { @Override public float ratio(Resource a, Resource b) { return Math.max( - (float)a.getMemory()/b.getMemory(), + (float)a.getMemorySize()/b.getMemorySize(), (float)a.getVirtualCores()/b.getVirtualCores() ); } @Override - public Resource divideAndCeil(Resource numerator, int denominator) { + public Resource divideAndCeil(Resource numerator, long denominator) { return Resources.createResource( - divideAndCeil(numerator.getMemory(), denominator), + divideAndCeil(numerator.getMemorySize(), denominator), divideAndCeil(numerator.getVirtualCores(), denominator) ); } @@ -152,12 +152,12 @@ public class DominantResourceCalculator extends ResourceCalculator { @Override public Resource normalize(Resource r, Resource minimumResource, Resource maximumResource, Resource stepFactor) { - int normalizedMemory = Math.min( + long normalizedMemory = Math.min( roundUp( - Math.max(r.getMemory(), minimumResource.getMemory()), - stepFactor.getMemory()), - maximumResource.getMemory()); - int normalizedCores = Math.min( + Math.max(r.getMemorySize(), minimumResource.getMemorySize()), + stepFactor.getMemorySize()), + maximumResource.getMemorySize()); + long normalizedCores = Math.min( roundUp( Math.max(r.getVirtualCores(), minimumResource.getVirtualCores()), stepFactor.getVirtualCores()), @@ -169,7 +169,7 @@ public class DominantResourceCalculator extends ResourceCalculator { @Override public Resource roundUp(Resource r, Resource stepFactor) { return Resources.createResource( - roundUp(r.getMemory(), stepFactor.getMemory()), + roundUp(r.getMemorySize(), stepFactor.getMemorySize()), roundUp(r.getVirtualCores(), stepFactor.getVirtualCores()) ); } @@ -177,7 +177,7 @@ public class DominantResourceCalculator extends ResourceCalculator { @Override public Resource roundDown(Resource r, Resource stepFactor) { return Resources.createResource( - roundDown(r.getMemory(), stepFactor.getMemory()), + roundDown(r.getMemorySize(), stepFactor.getMemorySize()), roundDown(r.getVirtualCores(), stepFactor.getVirtualCores()) ); } @@ -187,7 +187,7 @@ public class DominantResourceCalculator extends ResourceCalculator { Resource stepFactor) { return Resources.createResource( roundUp( - (int)Math.ceil(r.getMemory() * by), stepFactor.getMemory()), + (int)Math.ceil(r.getMemorySize() * by), stepFactor.getMemorySize()), roundUp( (int)Math.ceil(r.getVirtualCores() * by), stepFactor.getVirtualCores()) @@ -199,8 +199,8 @@ public class DominantResourceCalculator extends ResourceCalculator { Resource stepFactor) { return Resources.createResource( roundDown( - (int)(r.getMemory() * by), - stepFactor.getMemory() + (int)(r.getMemorySize() * by), + stepFactor.getMemorySize() ), roundDown( (int)(r.getVirtualCores() * by), @@ -212,7 +212,7 @@ public class DominantResourceCalculator extends ResourceCalculator { @Override public boolean fitsIn(Resource cluster, Resource smaller, Resource bigger) { - return smaller.getMemory() <= bigger.getMemory() + return smaller.getMemorySize() <= bigger.getMemorySize() && smaller.getVirtualCores() <= bigger.getVirtualCores(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java index 3a312251fe4..3fe8adecb8b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/ResourceCalculator.java @@ -31,18 +31,18 @@ public abstract class ResourceCalculator { public abstract int compare(Resource clusterResource, Resource lhs, Resource rhs); - public static int divideAndCeil(int a, int b) { + public static long divideAndCeil(long a, long b) { if (b == 0) { return 0; } return (a + (b - 1)) / b; } - public static int roundUp(int a, int b) { + public static long roundUp(long a, long b) { return divideAndCeil(a, b) * b; } - public static int roundDown(int a, int b) { + public static long roundDown(long a, long b) { return (a / b) * b; } @@ -54,7 +54,7 @@ public abstract class ResourceCalculator { * @param required required resources * @return number of containers which can be allocated */ - public abstract int computeAvailableContainers( + public abstract long computeAvailableContainers( Resource available, Resource required); /** @@ -169,7 +169,7 @@ public abstract class ResourceCalculator { * @param denominator denominator * @return resultant resource */ - public abstract Resource divideAndCeil(Resource numerator, int denominator); + public abstract Resource divideAndCeil(Resource numerator, long denominator); /** * Check if a smaller resource can be contained by bigger resource. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java index 558f96c7446..e944e1174d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/resource/Resources.java @@ -31,12 +31,18 @@ public class Resources { private static final Resource NONE = new Resource() { @Override + @SuppressWarnings("deprecation") public int getMemory() { return 0; } @Override - public void setMemory(int memory) { + public long getMemorySize() { + return 0; + } + + @Override + public void setMemory(long memory) { throw new RuntimeException("NONE cannot be modified!"); } @@ -46,17 +52,22 @@ public class Resources { } @Override - public void setVirtualCores(int cores) { + public long getVirtualCoresSize() { + return 0; + } + + @Override + public void setVirtualCores(long cores) { throw new RuntimeException("NONE cannot be modified!"); } @Override public int compareTo(Resource o) { - int diff = 0 - o.getMemory(); + long diff = 0 - o.getMemorySize(); if (diff == 0) { diff = 0 - o.getVirtualCores(); } - return diff; + return Long.signum(diff); } }; @@ -64,12 +75,18 @@ public class Resources { private static final Resource UNBOUNDED = new Resource() { @Override + @SuppressWarnings("deprecation") public int getMemory() { return Integer.MAX_VALUE; } @Override - public void setMemory(int memory) { + public long getMemorySize() { + return Long.MAX_VALUE; + } + + @Override + public void setMemory(long memory) { throw new RuntimeException("UNBOUNDED cannot be modified!"); } @@ -79,26 +96,31 @@ public class Resources { } @Override - public void setVirtualCores(int cores) { + public long getVirtualCoresSize() { + return Long.MAX_VALUE; + } + + @Override + public void setVirtualCores(long cores) { throw new RuntimeException("UNBOUNDED cannot be modified!"); } @Override public int compareTo(Resource o) { - int diff = Integer.MAX_VALUE - o.getMemory(); + long diff = Long.MAX_VALUE - o.getMemorySize(); if (diff == 0) { - diff = Integer.MAX_VALUE - o.getVirtualCores(); + diff = Long.MAX_VALUE - o.getVirtualCoresSize(); } - return diff; + return Long.signum(diff); } }; - public static Resource createResource(int memory) { + public static Resource createResource(long memory) { return createResource(memory, (memory > 0) ? 1 : 0); } - public static Resource createResource(int memory, int cores) { + public static Resource createResource(long memory, long cores) { Resource resource = Records.newRecord(Resource.class); resource.setMemory(memory); resource.setVirtualCores(cores); @@ -114,11 +136,11 @@ public class Resources { } public static Resource clone(Resource res) { - return createResource(res.getMemory(), res.getVirtualCores()); + return createResource(res.getMemorySize(), res.getVirtualCores()); } public static Resource addTo(Resource lhs, Resource rhs) { - lhs.setMemory(lhs.getMemory() + rhs.getMemory()); + lhs.setMemory(lhs.getMemorySize() + rhs.getMemorySize()); lhs.setVirtualCores(lhs.getVirtualCores() + rhs.getVirtualCores()); return lhs; } @@ -128,7 +150,7 @@ public class Resources { } public static Resource subtractFrom(Resource lhs, Resource rhs) { - lhs.setMemory(lhs.getMemory() - rhs.getMemory()); + lhs.setMemory(lhs.getMemorySize() - rhs.getMemorySize()); lhs.setVirtualCores(lhs.getVirtualCores() - rhs.getVirtualCores()); return lhs; } @@ -142,7 +164,7 @@ public class Resources { } public static Resource multiplyTo(Resource lhs, double by) { - lhs.setMemory((int)(lhs.getMemory() * by)); + lhs.setMemory((int)(lhs.getMemorySize() * by)); lhs.setVirtualCores((int)(lhs.getVirtualCores() * by)); return lhs; } @@ -157,7 +179,7 @@ public class Resources { */ public static Resource multiplyAndAddTo( Resource lhs, Resource rhs, double by) { - lhs.setMemory(lhs.getMemory() + (int)(rhs.getMemory() * by)); + lhs.setMemory(lhs.getMemorySize() + (int)(rhs.getMemorySize() * by)); lhs.setVirtualCores(lhs.getVirtualCores() + (int)(rhs.getVirtualCores() * by)); return lhs; @@ -175,7 +197,7 @@ public class Resources { public static Resource multiplyAndRoundDown(Resource lhs, double by) { Resource out = clone(lhs); - out.setMemory((int)(lhs.getMemory() * by)); + out.setMemory((int)(lhs.getMemorySize() * by)); out.setVirtualCores((int)(lhs.getVirtualCores() * by)); return out; } @@ -264,7 +286,7 @@ public class Resources { } public static boolean fitsIn(Resource smaller, Resource bigger) { - return smaller.getMemory() <= bigger.getMemory() && + return smaller.getMemorySize() <= bigger.getMemorySize() && smaller.getVirtualCores() <= bigger.getVirtualCores(); } @@ -274,12 +296,12 @@ public class Resources { } public static Resource componentwiseMin(Resource lhs, Resource rhs) { - return createResource(Math.min(lhs.getMemory(), rhs.getMemory()), + return createResource(Math.min(lhs.getMemorySize(), rhs.getMemorySize()), Math.min(lhs.getVirtualCores(), rhs.getVirtualCores())); } public static Resource componentwiseMax(Resource lhs, Resource rhs) { - return createResource(Math.max(lhs.getMemory(), rhs.getMemory()), + return createResource(Math.max(lhs.getMemorySize(), rhs.getMemorySize()), Math.max(lhs.getVirtualCores(), rhs.getVirtualCores())); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java index d38ffdc5a53..7396adc2a32 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/util/resource/TestResources.java @@ -24,18 +24,18 @@ import static org.junit.Assert.assertTrue; public class TestResources { - public Resource createResource(int memory, int vCores) { + public Resource createResource(long memory, long vCores) { return Resource.newInstance(memory, vCores); } @Test(timeout=1000) public void testCompareToWithUnboundedResource() { assertTrue(Resources.unbounded().compareTo( - createResource(Integer.MAX_VALUE, Integer.MAX_VALUE)) == 0); + createResource(Long.MAX_VALUE, Long.MAX_VALUE)) == 0); assertTrue(Resources.unbounded().compareTo( - createResource(Integer.MAX_VALUE, 0)) > 0); + createResource(Long.MAX_VALUE, 0)) > 0); assertTrue(Resources.unbounded().compareTo( - createResource(0, Integer.MAX_VALUE)) > 0); + createResource(0, Long.MAX_VALUE)) > 0); } @Test(timeout=1000) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index b97f935aadb..22e45fa3d37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -442,7 +442,7 @@ public class BuilderUtils { return report; } - public static Resource newResource(int memory, int vCores) { + public static Resource newResource(long memory, long vCores) { Resource resource = recordFactory.newRecordInstance(Resource.class); resource.setMemory(memory); resource.setVirtualCores(vCores); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java index cad3b2e3ac8..ff4519da388 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/AppInfo.java @@ -59,8 +59,8 @@ public class AppInfo { protected long elapsedTime; protected String applicationTags; protected int priority; - private int allocatedCpuVcores; - private int allocatedMemoryMB; + private long allocatedCpuVcores; + private long allocatedMemoryMB; protected boolean unmanagedApplication; private String appNodeLabelExpression; private String amNodeLabelExpression; @@ -100,7 +100,7 @@ public class AppInfo { allocatedCpuVcores = app.getApplicationResourceUsageReport() .getUsedResources().getVirtualCores(); allocatedMemoryMB = app.getApplicationResourceUsageReport() - .getUsedResources().getMemory(); + .getUsedResources().getMemorySize(); } } progress = app.getProgress() * 100; // in percent @@ -152,11 +152,11 @@ public class AppInfo { return runningContainers; } - public int getAllocatedCpuVcores() { + public long getAllocatedCpuVcores() { return allocatedCpuVcores; } - public int getAllocatedMemoryMB() { + public long getAllocatedMemoryMB() { return allocatedMemoryMB; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java index f127f9cda5c..1a5ee85cf89 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/webapp/dao/ContainerInfo.java @@ -36,8 +36,8 @@ import org.apache.hadoop.yarn.util.Times; public class ContainerInfo { protected String containerId; - protected int allocatedMB; - protected int allocatedVCores; + protected long allocatedMB; + protected long allocatedVCores; protected String assignedNodeId; protected int priority; protected long startedTime; @@ -57,7 +57,7 @@ public class ContainerInfo { public ContainerInfo(ContainerReport container) { containerId = container.getContainerId().toString(); if (container.getAllocatedResource() != null) { - allocatedMB = container.getAllocatedResource().getMemory(); + allocatedMB = container.getAllocatedResource().getMemorySize(); allocatedVCores = container.getAllocatedResource().getVirtualCores(); } if (container.getAssignedNode() != null) { @@ -79,11 +79,11 @@ public class ContainerInfo { return containerId; } - public int getAllocatedMB() { + public long getAllocatedMB() { return allocatedMB; } - public int getAllocatedVCores() { + public long getAllocatedVCores() { return allocatedVCores; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java index c9427ddabc5..e25547d0d57 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/test/java/org/apache/hadoop/yarn/TestYarnServerApiClasses.java @@ -180,7 +180,7 @@ public class TestYarnServerApiClasses { assertEquals(1, copy.getContainersToDecrease().get(0) .getId().getContainerId()); assertEquals(1024, copy.getContainersToDecrease().get(1) - .getResource().getMemory()); + .getResource().getMemorySize()); } /** @@ -201,7 +201,7 @@ public class TestYarnServerApiClasses { assertEquals(8080, copy.getHttpPort()); assertEquals(9090, copy.getNodeId().getPort()); - assertEquals(10000, copy.getResource().getMemory()); + assertEquals(10000, copy.getResource().getMemorySize()); assertEquals(2, copy.getResource().getVirtualCores()); } @@ -273,7 +273,7 @@ public class TestYarnServerApiClasses { assertEquals(1, copy.getIncreasedContainers().get(0) .getId().getContainerId()); assertEquals(4096, copy.getIncreasedContainers().get(1) - .getResource().getMemory()); + .getResource().getMemorySize()); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java index d08ee67311a..98171af2768 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/ContainerExecutor.java @@ -405,7 +405,7 @@ public abstract class ContainerExecutor implements Configurable { .getBoolean( YarnConfiguration.NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED, YarnConfiguration.DEFAULT_NM_WINDOWS_CONTAINER_MEMORY_LIMIT_ENABLED)) { - memory = resource.getMemory(); + memory = (int) resource.getMemorySize(); } if (conf.getBoolean( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 2ff8e7ccf38..304488eab95 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -215,7 +215,7 @@ public class ContainerImpl implements Container { if (recoveredCapability != null && !this.resource.equals(recoveredCapability)) { // resource capability had been updated before NM was down - this.resource = Resource.newInstance(recoveredCapability.getMemory(), + this.resource = Resource.newInstance(recoveredCapability.getMemorySize(), recoveredCapability.getVirtualCores()); } this.remainingRetryAttempts = rcs.getRemainingRetryAttempts(); @@ -611,7 +611,7 @@ public class ContainerImpl implements Container { long launchDuration = clock.getTime() - containerLaunchStartTime; metrics.addContainerLaunchDuration(launchDuration); - long pmemBytes = getResource().getMemory() * 1024 * 1024L; + long pmemBytes = getResource().getMemorySize() * 1024 * 1024L; float pmemRatio = daemonConf.getFloat( YarnConfiguration.NM_VMEM_PMEM_RATIO, YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java index 78113e5e8fe..4630c1bd87d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/resources/CGroupsMemoryResourceHandlerImpl.java @@ -115,8 +115,8 @@ public class CGroupsMemoryResourceHandlerImpl implements MemoryResourceHandler { String cgroupId = container.getContainerId().toString(); //memory is in MB long containerSoftLimit = - (long) (container.getResource().getMemory() * this.softLimit); - long containerHardLimit = container.getResource().getMemory(); + (long) (container.getResource().getMemorySize() * this.softLimit); + long containerHardLimit = container.getResource().getMemorySize(); cGroupsHandler.createCGroup(MEMORY, cgroupId); try { cGroupsHandler.updateCGroupParam(MEMORY, cgroupId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java index 8ca744d9298..3fbae102550 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/ContainersMonitorImpl.java @@ -646,7 +646,7 @@ public class ContainersMonitorImpl extends AbstractService implements ChangeMonitoringContainerResourceEvent changeEvent = (ChangeMonitoringContainerResourceEvent) monitoringEvent; Resource resource = changeEvent.getResource(); - pmemLimitMBs = resource.getMemory(); + pmemLimitMBs = (int) resource.getMemorySize(); vmemLimitMBs = (int) (pmemLimitMBs * vmemRatio); cpuVcores = resource.getVirtualCores(); usageMetrics.recordResourceLimit( @@ -822,7 +822,7 @@ public class ContainersMonitorImpl extends AbstractService implements } LOG.info("Changing resource-monitoring for " + containerId); updateContainerMetrics(monitoringEvent); - long pmemLimit = changeEvent.getResource().getMemory() * 1024L * 1024L; + long pmemLimit = changeEvent.getResource().getMemorySize() * 1024L * 1024L; long vmemLimit = (long) (pmemLimit * vmemRatio); int cpuVcores = changeEvent.getResource().getVirtualCores(); processTreeInfo.setResourceLimit(pmemLimit, vmemLimit, cpuVcores); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java index a750d936745..1ce3356748b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/queuing/QueuingContainerManagerImpl.java @@ -600,7 +600,7 @@ public class QueuingContainerManagerImpl extends ContainerManagerImpl { private ProcessTreeInfo createProcessTreeInfo(ContainerId containerId, Resource resource, Configuration conf) { - long pmemBytes = resource.getMemory() * 1024 * 1024L; + long pmemBytes = resource.getMemorySize() * 1024 * 1024L; float pmemRatio = conf.getFloat(YarnConfiguration.NM_VMEM_PMEM_RATIO, YarnConfiguration.DEFAULT_NM_VMEM_PMEM_RATIO); long vmemBytes = (long) (pmemRatio * pmemBytes); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java index a38d0b71435..11e86849be3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/metrics/NodeManagerMetrics.java @@ -115,9 +115,9 @@ public class NodeManagerMetrics { public void allocateContainer(Resource res) { allocatedContainers.incr(); - allocatedMB = allocatedMB + res.getMemory(); + allocatedMB = allocatedMB + res.getMemorySize(); allocatedGB.set((int)Math.ceil(allocatedMB/1024d)); - availableMB = availableMB - res.getMemory(); + availableMB = availableMB - res.getMemorySize(); availableGB.set((int)Math.floor(availableMB/1024d)); allocatedVCores.incr(res.getVirtualCores()); availableVCores.decr(res.getVirtualCores()); @@ -125,16 +125,16 @@ public class NodeManagerMetrics { public void releaseContainer(Resource res) { allocatedContainers.decr(); - allocatedMB = allocatedMB - res.getMemory(); + allocatedMB = allocatedMB - res.getMemorySize(); allocatedGB.set((int)Math.ceil(allocatedMB/1024d)); - availableMB = availableMB + res.getMemory(); + availableMB = availableMB + res.getMemorySize(); availableGB.set((int)Math.floor(availableMB/1024d)); allocatedVCores.decr(res.getVirtualCores()); availableVCores.incr(res.getVirtualCores()); } public void changeContainer(Resource before, Resource now) { - int deltaMB = now.getMemory() - before.getMemory(); + long deltaMB = now.getMemorySize() - before.getMemorySize(); int deltaVCores = now.getVirtualCores() - before.getVirtualCores(); allocatedMB = allocatedMB + deltaMB; allocatedGB.set((int)Math.ceil(allocatedMB/1024d)); @@ -145,7 +145,7 @@ public class NodeManagerMetrics { } public void addResource(Resource res) { - availableMB = availableMB + res.getMemory(); + availableMB = availableMB + res.getMemorySize(); availableGB.incr((int)Math.floor(availableMB/1024d)); availableVCores.incr(res.getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java index cf022b985ff..10a8156fd78 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/dao/ContainerInfo.java @@ -81,7 +81,7 @@ public class ContainerInfo { this.user = container.getUser(); Resource res = container.getResource(); if (res != null) { - this.totalMemoryNeededMB = res.getMemory(); + this.totalMemoryNeededMB = res.getMemorySize(); this.totalVCoresNeeded = res.getVirtualCores(); } this.containerLogsShortLink = ujoin("containerlogs", this.id, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 689a32754ae..7975f23de6a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -190,7 +190,7 @@ public class TestNodeStatusUpdater { InetSocketAddress expected = NetUtils.getConnectAddress( conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1)); Assert.assertEquals(NetUtils.getHostPortString(expected), nodeId.toString()); - Assert.assertEquals(5 * 1024, resource.getMemory()); + Assert.assertEquals(5 * 1024, resource.getMemorySize()); registeredNodes.add(nodeId); RegisterNodeManagerResponse response = recordFactory @@ -918,7 +918,7 @@ public class TestNodeStatusUpdater { conf.getSocketAddr(YarnConfiguration.NM_ADDRESS, null, -1)); Assert.assertEquals(NetUtils.getHostPortString(expected), nodeId.toString()); - Assert.assertEquals(5 * 1024, resource.getMemory()); + Assert.assertEquals(5 * 1024, resource.getMemorySize()); registeredNodes.add(nodeId); RegisterNodeManagerResponse response = recordFactory diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java index 008f7ee2974..94145e40f61 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java @@ -228,7 +228,7 @@ public class TestContainersMonitor extends BaseContainerManagerTest { commands.add("/bin/bash"); commands.add(scriptFile.getAbsolutePath()); containerLaunchContext.setCommands(commands); - Resource r = BuilderUtils.newResource(8 * 1024 * 1024, 1); + Resource r = BuilderUtils.newResource(0, 0); ContainerTokenIdentifier containerIdentifier = new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user, r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java index ef4a0d47700..f6af0309889 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java @@ -94,9 +94,9 @@ public class RMNMInfo implements RMNMInfoBeans { ni.getNodeManagerVersion()); if(report != null) { info.put("NumContainers", report.getNumContainers()); - info.put("UsedMemoryMB", report.getUsedResource().getMemory()); + info.put("UsedMemoryMB", report.getUsedResource().getMemorySize()); info.put("AvailableMemoryMB", - report.getAvailableResource().getMemory()); + report.getAvailableResource().getMemorySize()); } nodesInfo.add(info); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java index 1318d5814be..95fdb0528b9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMServerUtils.java @@ -142,7 +142,7 @@ public class RMServerUtils { // example, you cannot request target resource of a <10G, 10> container to // <20G, 8> if (increase) { - if (originalResource.getMemory() > targetResource.getMemory() + if (originalResource.getMemorySize() > targetResource.getMemorySize() || originalResource.getVirtualCores() > targetResource .getVirtualCores()) { String msg = @@ -153,7 +153,7 @@ public class RMServerUtils { throw new InvalidResourceRequestException(msg); } } else { - if (originalResource.getMemory() < targetResource.getMemory() + if (originalResource.getMemorySize() < targetResource.getMemorySize() || originalResource.getVirtualCores() < targetResource .getVirtualCores()) { String msg = @@ -243,15 +243,15 @@ public class RMServerUtils { return; } for (ContainerResourceChangeRequest request : requests) { - if (request.getCapability().getMemory() < 0 - || request.getCapability().getMemory() > maximumAllocation - .getMemory()) { + if (request.getCapability().getMemorySize() < 0 + || request.getCapability().getMemorySize() > maximumAllocation + .getMemorySize()) { throw new InvalidResourceRequestException("Invalid " + (increase ? "increase" : "decrease") + " request" + ", requested memory < 0" + ", or requested memory > max configured" + ", requestedMemory=" - + request.getCapability().getMemory() + ", maxMemory=" - + maximumAllocation.getMemory()); + + request.getCapability().getMemorySize() + ", maxMemory=" + + maximumAllocation.getMemorySize()); } if (request.getCapability().getVirtualCores() < 0 || request.getCapability().getVirtualCores() > maximumAllocation diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java index cfb050a1111..38f2660a310 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ResourceTrackerService.java @@ -362,7 +362,7 @@ public class ResourceTrackerService extends AbstractService implements } // Check if this node has minimum allocations - if (capability.getMemory() < minAllocMb + if (capability.getMemorySize() < minAllocMb || capability.getVirtualCores() < minAllocVcores) { String message = "NodeManager from " + host diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java index 0e3802b5743..b0b976d91e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/SystemMetricsPublisher.java @@ -480,7 +480,7 @@ public class SystemMetricsPublisher extends CompositeService { TimelineEntity entity = createContainerEntity(event.getContainerId()); Map entityInfo = new HashMap(); entityInfo.put(ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO, - event.getAllocatedResource().getMemory()); + event.getAllocatedResource().getMemorySize()); entityInfo.put(ContainerMetricsConstants.ALLOCATED_VCORE_ENTITY_INFO, event.getAllocatedResource().getVirtualCores()); entityInfo.put(ContainerMetricsConstants.ALLOCATED_HOST_ENTITY_INFO, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java index 116cd22b721..04ed1358806 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TempQueuePerPartition.java @@ -258,17 +258,17 @@ public class TempQueuePerPartition { void appendLogString(StringBuilder sb) { sb.append(queueName).append(", ") - .append(current.getMemory()).append(", ") + .append(current.getMemorySize()).append(", ") .append(current.getVirtualCores()).append(", ") - .append(pending.getMemory()).append(", ") + .append(pending.getMemorySize()).append(", ") .append(pending.getVirtualCores()).append(", ") - .append(getGuaranteed().getMemory()).append(", ") + .append(getGuaranteed().getMemorySize()).append(", ") .append(getGuaranteed().getVirtualCores()).append(", ") - .append(idealAssigned.getMemory()).append(", ") + .append(idealAssigned.getMemorySize()).append(", ") .append(idealAssigned.getVirtualCores()).append(", ") - .append(toBePreempted.getMemory()).append(", ") + .append(toBePreempted.getMemorySize()).append(", ") .append(toBePreempted.getVirtualCores() ).append(", ") - .append(actuallyToBePreempted.getMemory()).append(", ") + .append(actuallyToBePreempted.getMemorySize()).append(", ") .append(actuallyToBePreempted.getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java index eaf29028598..90357e3504f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/AbstractSchedulerPlanFollower.java @@ -160,7 +160,7 @@ public abstract class AbstractSchedulerPlanFollower implements PlanFollower { } Resource capToAssign = res.getResourcesAtTime(now); float targetCapacity = 0f; - if (planResources.getMemory() > 0 + if (planResources.getMemorySize() > 0 && planResources.getVirtualCores() > 0) { if (shouldResize) { capToAssign = diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java index f8b68e32246..07bee998207 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/CapacityOverTimePolicy.java @@ -246,7 +246,7 @@ public class CapacityOverTimePolicy implements SharingPolicy { long vcores; public IntegralResource(Resource resource) { - this.memory = resource.getMemory(); + this.memory = resource.getMemorySize(); this.vcores = resource.getVirtualCores(); } @@ -256,12 +256,12 @@ public class CapacityOverTimePolicy implements SharingPolicy { } public void add(Resource r) { - memory += r.getMemory(); + memory += r.getMemorySize(); vcores += r.getVirtualCores(); } public void subtract(Resource r) { - memory -= r.getMemory(); + memory -= r.getMemorySize(); vcores -= r.getVirtualCores(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java index aba4822a496..81d32c185e0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemUtil.java @@ -106,7 +106,7 @@ public final class ReservationSystemUtil { public static ResourceProto convertToProtoFormat(Resource e) { return YarnProtos.ResourceProto.newBuilder() - .setMemory(e.getMemory()) + .setMemory(e.getMemorySize()) .setVirtualCores(e.getVirtualCores()) .build(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java index 5a46a4e43a8..43d6584c10f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/StageEarliestStartByDemand.java @@ -88,7 +88,7 @@ public class StageEarliestStartByDemand implements StageEarliestStart { // Weight = total memory consumption of stage protected double calcWeight(ReservationRequest stage) { - return (stage.getDuration() * stage.getCapability().getMemory()) + return (stage.getDuration() * stage.getCapability().getMemorySize()) * (stage.getNumContainers()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java index 973a7dba945..512149385d5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmcontainer/RMContainerImpl.java @@ -712,7 +712,7 @@ public class RMContainerImpl implements RMContainer, Comparable { } long usedMillis = container.finishTime - container.creationTime; - long memorySeconds = resource.getMemory() + long memorySeconds = resource.getMemorySize() * usedMillis / DateUtils.MILLIS_PER_SECOND; long vcoreSeconds = resource.getVirtualCores() * usedMillis / DateUtils.MILLIS_PER_SECOND; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java index 34b4267c758..feb071fca83 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/ClusterNodeTracker.java @@ -56,8 +56,8 @@ public class ClusterNodeTracker { private Resource staleClusterCapacity = null; // Max allocation - private int maxNodeMemory = -1; - private int maxNodeVCores = -1; + private long maxNodeMemory = -1; + private long maxNodeVCores = -1; private Resource configuredMaxAllocation; private boolean forceConfiguredMaxAllocation = true; private long configuredMaxAllocationWaitTime; @@ -211,7 +211,7 @@ public class ClusterNodeTracker { } return Resources.createResource( - Math.min(configuredMaxAllocation.getMemory(), maxNodeMemory), + Math.min(configuredMaxAllocation.getMemorySize(), maxNodeMemory), Math.min(configuredMaxAllocation.getVirtualCores(), maxNodeVCores) ); } finally { @@ -224,7 +224,7 @@ public class ClusterNodeTracker { writeLock.lock(); try { if (add) { // added node - int nodeMemory = totalResource.getMemory(); + long nodeMemory = totalResource.getMemorySize(); if (nodeMemory > maxNodeMemory) { maxNodeMemory = nodeMemory; } @@ -233,7 +233,7 @@ public class ClusterNodeTracker { maxNodeVCores = nodeVCores; } } else { // removed node - if (maxNodeMemory == totalResource.getMemory()) { + if (maxNodeMemory == totalResource.getMemorySize()) { maxNodeMemory = -1; } if (maxNodeVCores == totalResource.getVirtualCores()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java index d9c7283675e..69c3e8b8773 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/QueueMetrics.java @@ -38,6 +38,7 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry; import org.apache.hadoop.metrics2.lib.MutableCounterInt; import org.apache.hadoop.metrics2.lib.MutableCounterLong; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.metrics2.lib.MutableRate; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Resource; @@ -59,8 +60,8 @@ public class QueueMetrics implements MetricsSource { @Metric("# of apps killed") MutableCounterInt appsKilled; @Metric("# of apps failed") MutableCounterInt appsFailed; - @Metric("Allocated memory in MB") MutableGaugeInt allocatedMB; - @Metric("Allocated CPU in virtual cores") MutableGaugeInt allocatedVCores; + @Metric("Allocated memory in MB") MutableGaugeLong allocatedMB; + @Metric("Allocated CPU in virtual cores") MutableGaugeLong allocatedVCores; @Metric("# of allocated containers") MutableGaugeInt allocatedContainers; @Metric("Aggregate # of allocated containers") MutableCounterLong aggregateContainersAllocated; @Metric("Aggregate # of allocated node-local containers") @@ -70,13 +71,13 @@ public class QueueMetrics implements MetricsSource { @Metric("Aggregate # of allocated off-switch containers") MutableCounterLong aggregateOffSwitchContainersAllocated; @Metric("Aggregate # of released containers") MutableCounterLong aggregateContainersReleased; - @Metric("Available memory in MB") MutableGaugeInt availableMB; - @Metric("Available CPU in virtual cores") MutableGaugeInt availableVCores; - @Metric("Pending memory allocation in MB") MutableGaugeInt pendingMB; - @Metric("Pending CPU allocation in virtual cores") MutableGaugeInt pendingVCores; + @Metric("Available memory in MB") MutableGaugeLong availableMB; + @Metric("Available CPU in virtual cores") MutableGaugeLong availableVCores; + @Metric("Pending memory allocation in MB") MutableGaugeLong pendingMB; + @Metric("Pending CPU allocation in virtual cores") MutableGaugeLong pendingVCores; @Metric("# of pending containers") MutableGaugeInt pendingContainers; - @Metric("# of reserved memory in MB") MutableGaugeInt reservedMB; - @Metric("Reserved CPU in virtual cores") MutableGaugeInt reservedVCores; + @Metric("# of reserved memory in MB") MutableGaugeLong reservedMB; + @Metric("Reserved CPU in virtual cores") MutableGaugeLong reservedVCores; @Metric("# of reserved containers") MutableGaugeInt reservedContainers; @Metric("# of active users") MutableGaugeInt activeUsers; @Metric("# of active applications") MutableGaugeInt activeApplications; @@ -325,7 +326,7 @@ public class QueueMetrics implements MetricsSource { * @param limit resource limit */ public void setAvailableResourcesToQueue(Resource limit) { - availableMB.set(limit.getMemory()); + availableMB.set(limit.getMemorySize()); availableVCores.set(limit.getVirtualCores()); } @@ -362,8 +363,8 @@ public class QueueMetrics implements MetricsSource { private void _incrPendingResources(int containers, Resource res) { pendingContainers.incr(containers); - pendingMB.incr(res.getMemory() * containers); - pendingVCores.incr(res.getVirtualCores() * containers); + pendingMB.incr(res.getMemorySize() * containers); + pendingVCores.incr(res.getVirtualCoresSize() * containers); } public void decrPendingResources(String user, int containers, Resource res) { @@ -379,8 +380,8 @@ public class QueueMetrics implements MetricsSource { private void _decrPendingResources(int containers, Resource res) { pendingContainers.decr(containers); - pendingMB.decr(res.getMemory() * containers); - pendingVCores.decr(res.getVirtualCores() * containers); + pendingMB.decr(res.getMemorySize() * containers); + pendingVCores.decr(res.getVirtualCoresSize() * containers); } public void incrNodeTypeAggregations(String user, NodeType type) { @@ -407,8 +408,8 @@ public class QueueMetrics implements MetricsSource { allocatedContainers.incr(containers); aggregateContainersAllocated.incr(containers); - allocatedMB.incr(res.getMemory() * containers); - allocatedVCores.incr(res.getVirtualCores() * containers); + allocatedMB.incr(res.getMemorySize() * containers); + allocatedVCores.incr(res.getVirtualCoresSize() * containers); if (decrPending) { _decrPendingResources(containers, res); } @@ -428,10 +429,10 @@ public class QueueMetrics implements MetricsSource { * @param res */ public void allocateResources(String user, Resource res) { - allocatedMB.incr(res.getMemory()); + allocatedMB.incr(res.getMemorySize()); allocatedVCores.incr(res.getVirtualCores()); - pendingMB.decr(res.getMemory()); + pendingMB.decr(res.getMemorySize()); pendingVCores.decr(res.getVirtualCores()); QueueMetrics userMetrics = getUserMetrics(user); @@ -446,8 +447,8 @@ public class QueueMetrics implements MetricsSource { public void releaseResources(String user, int containers, Resource res) { allocatedContainers.decr(containers); aggregateContainersReleased.incr(containers); - allocatedMB.decr(res.getMemory() * containers); - allocatedVCores.decr(res.getVirtualCores() * containers); + allocatedMB.decr(res.getMemorySize() * containers); + allocatedVCores.decr(res.getVirtualCoresSize() * containers); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { userMetrics.releaseResources(user, containers, res); @@ -464,7 +465,7 @@ public class QueueMetrics implements MetricsSource { * @param res */ public void releaseResources(String user, Resource res) { - allocatedMB.decr(res.getMemory()); + allocatedMB.decr(res.getMemorySize()); allocatedVCores.decr(res.getVirtualCores()); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { @@ -477,7 +478,7 @@ public class QueueMetrics implements MetricsSource { public void reserveResource(String user, Resource res) { reservedContainers.incr(); - reservedMB.incr(res.getMemory()); + reservedMB.incr(res.getMemorySize()); reservedVCores.incr(res.getVirtualCores()); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { @@ -490,7 +491,7 @@ public class QueueMetrics implements MetricsSource { public void unreserveResource(String user, Resource res) { reservedContainers.decr(); - reservedMB.decr(res.getMemory()); + reservedMB.decr(res.getMemorySize()); reservedVCores.decr(res.getVirtualCores()); QueueMetrics userMetrics = getUserMetrics(user); if (userMetrics != null) { @@ -563,11 +564,11 @@ public class QueueMetrics implements MetricsSource { return BuilderUtils.newResource(allocatedMB.value(), allocatedVCores.value()); } - public int getAllocatedMB() { + public long getAllocatedMB() { return allocatedMB.value(); } - public int getAllocatedVirtualCores() { + public long getAllocatedVirtualCores() { return allocatedVCores.value(); } @@ -575,19 +576,19 @@ public class QueueMetrics implements MetricsSource { return allocatedContainers.value(); } - public int getAvailableMB() { + public long getAvailableMB() { return availableMB.value(); } - public int getAvailableVirtualCores() { + public long getAvailableVirtualCores() { return availableVCores.value(); } - public int getPendingMB() { + public long getPendingMB() { return pendingMB.value(); } - public int getPendingVirtualCores() { + public long getPendingVirtualCores() { return pendingVCores.value(); } @@ -595,11 +596,11 @@ public class QueueMetrics implements MetricsSource { return pendingContainers.value(); } - public int getReservedMB() { + public long getReservedMB() { return reservedMB.value(); } - public int getReservedVirtualCores() { + public long getReservedVirtualCores() { return reservedVCores.value(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java index 7308e22ce72..ffb865786bb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplicationAttempt.java @@ -444,7 +444,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { */ public synchronized Resource getHeadroom() { // Corner case to deal with applications being slightly over-limit - if (resourceLimit.getMemory() < 0) { + if (resourceLimit.getMemorySize() < 0) { resourceLimit.setMemory(0); } @@ -480,7 +480,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { if (requests != null) { LOG.debug("showRequests:" + " application=" + getApplicationId() + " headRoom=" + getHeadroom() + " currentConsumption=" - + attemptResourceUsage.getUsed().getMemory()); + + attemptResourceUsage.getUsed().getMemorySize()); for (ResourceRequest request : requests.values()) { LOG.debug("showRequests:" + " application=" + getApplicationId() + " request=" + request); @@ -682,7 +682,7 @@ public class SchedulerApplicationAttempt implements SchedulableEntity { for (RMContainer rmContainer : this.liveContainers.values()) { long usedMillis = currentTimeMillis - rmContainer.getCreationTime(); Resource resource = rmContainer.getContainer().getResource(); - memorySeconds += resource.getMemory() * usedMillis / + memorySeconds += resource.getMemorySize() * usedMillis / DateUtils.MILLIS_PER_SECOND; vcoreSeconds += resource.getVirtualCores() * usedMillis / DateUtils.MILLIS_PER_SECOND; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index b4609640fe7..c999e26b19a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -274,13 +274,13 @@ public class SchedulerUtils { private static void validateResourceRequest(ResourceRequest resReq, Resource maximumResource, QueueInfo queueInfo, RMContext rmContext) throws InvalidResourceRequestException { - if (resReq.getCapability().getMemory() < 0 || - resReq.getCapability().getMemory() > maximumResource.getMemory()) { + if (resReq.getCapability().getMemorySize() < 0 || + resReq.getCapability().getMemorySize() > maximumResource.getMemorySize()) { throw new InvalidResourceRequestException("Invalid resource request" + ", requested memory < 0" + ", or requested memory > max configured" - + ", requestedMemory=" + resReq.getCapability().getMemory() - + ", maxMemory=" + maximumResource.getMemory()); + + ", requestedMemory=" + resReq.getCapability().getMemorySize() + + ", maxMemory=" + maximumResource.getMemorySize()); } if (resReq.getCapability().getVirtualCores() < 0 || resReq.getCapability().getVirtualCores() > diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java index 51d39ff6c45..58c398410af 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CSQueueMetrics.java @@ -23,7 +23,7 @@ import org.apache.hadoop.metrics2.MetricsSystem; import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; -import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; @@ -32,37 +32,37 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; public class CSQueueMetrics extends QueueMetrics { @Metric("AM memory limit in MB") - MutableGaugeInt AMResourceLimitMB; + MutableGaugeLong AMResourceLimitMB; @Metric("AM CPU limit in virtual cores") - MutableGaugeInt AMResourceLimitVCores; + MutableGaugeLong AMResourceLimitVCores; @Metric("Used AM memory limit in MB") - MutableGaugeInt usedAMResourceMB; + MutableGaugeLong usedAMResourceMB; @Metric("Used AM CPU limit in virtual cores") - MutableGaugeInt usedAMResourceVCores; + MutableGaugeLong usedAMResourceVCores; CSQueueMetrics(MetricsSystem ms, String queueName, Queue parent, boolean enableUserMetrics, Configuration conf) { super(ms, queueName, parent, enableUserMetrics, conf); } - public int getAMResourceLimitMB() { + public long getAMResourceLimitMB() { return AMResourceLimitMB.value(); } - public int getAMResourceLimitVCores() { + public long getAMResourceLimitVCores() { return AMResourceLimitVCores.value(); } - public int getUsedAMResourceMB() { + public long getUsedAMResourceMB() { return usedAMResourceMB.value(); } - public int getUsedAMResourceVCores() { + public long getUsedAMResourceVCores() { return usedAMResourceVCores.value(); } public void setAMResouceLimit(Resource res) { - AMResourceLimitMB.set(res.getMemory()); + AMResourceLimitMB.set(res.getMemorySize()); AMResourceLimitVCores.set(res.getVirtualCores()); } @@ -74,7 +74,7 @@ public class CSQueueMetrics extends QueueMetrics { } public void incAMUsed(String user, Resource res) { - usedAMResourceMB.incr(res.getMemory()); + usedAMResourceMB.incr(res.getMemorySize()); usedAMResourceVCores.incr(res.getVirtualCores()); CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user); if (userMetrics != null) { @@ -83,7 +83,7 @@ public class CSQueueMetrics extends QueueMetrics { } public void decAMUsed(String user, Resource res) { - usedAMResourceMB.decr(res.getMemory()); + usedAMResourceMB.decr(res.getMemorySize()); usedAMResourceVCores.decr(res.getVirtualCores()); CSQueueMetrics userMetrics = (CSQueueMetrics) getUserMetrics(user); if (userMetrics != null) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java index 95a12dc9399..49892f73528 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityHeadroomProvider.java @@ -65,7 +65,7 @@ public class CapacityHeadroomProvider { } } // Corner case to deal with applications being slightly over-limit - if (headroom.getMemory() < 0) { + if (headroom.getMemorySize() < 0) { headroom.setMemory(0); } return headroom; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java index d5bca66aff1..d5d1374d6c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacitySchedulerConfiguration.java @@ -677,7 +677,7 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur */ public Resource getMaximumAllocationPerQueue(String queue) { String queuePrefix = getQueuePrefix(queue); - int maxAllocationMbPerQueue = getInt(queuePrefix + MAXIMUM_ALLOCATION_MB, + long maxAllocationMbPerQueue = getInt(queuePrefix + MAXIMUM_ALLOCATION_MB, (int)UNDEFINED); int maxAllocationVcoresPerQueue = getInt( queuePrefix + MAXIMUM_ALLOCATION_VCORES, (int)UNDEFINED); @@ -690,7 +690,7 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur Resource clusterMax = getMaximumAllocation(); if (maxAllocationMbPerQueue == (int)UNDEFINED) { LOG.info("max alloc mb per queue for " + queue + " is undefined"); - maxAllocationMbPerQueue = clusterMax.getMemory(); + maxAllocationMbPerQueue = clusterMax.getMemorySize(); } if (maxAllocationVcoresPerQueue == (int)UNDEFINED) { LOG.info("max alloc vcore per queue for " + queue + " is undefined"); @@ -698,7 +698,7 @@ public class CapacitySchedulerConfiguration extends ReservationSchedulerConfigur } Resource result = Resources.createResource(maxAllocationMbPerQueue, maxAllocationVcoresPerQueue); - if (maxAllocationMbPerQueue > clusterMax.getMemory() + if (maxAllocationMbPerQueue > clusterMax.getMemorySize() || maxAllocationVcoresPerQueue > clusterMax.getVirtualCores()) { throw new IllegalArgumentException( "Queue maximum allocation cannot be larger than the cluster setting" diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java index d562f342380..6dcafecb184 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/LeafQueue.java @@ -449,7 +449,7 @@ public class LeafQueue extends AbstractCSQueue { // since we have already told running AM's the size Resource oldMax = getMaximumAllocation(); Resource newMax = newlyParsedLeafQueue.getMaximumAllocation(); - if (newMax.getMemory() < oldMax.getMemory() + if (newMax.getMemorySize() < oldMax.getMemorySize() || newMax.getVirtualCores() < oldMax.getVirtualCores()) { throw new IOException( "Trying to reinitialize " diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java index a5ca2d868f2..b2d4bbe723b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/allocator/RegularContainerAllocator.java @@ -442,7 +442,7 @@ public class RegularContainerAllocator extends AbstractContainerAllocator { priority, capability); // Can we allocate a container on this node? - int availableContainers = + long availableContainers = rc.computeAvailableContainers(available, capability); // How much need to unreserve equals to: diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java index 82d665c2612..5b83c9af5cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSAppAttempt.java @@ -193,7 +193,7 @@ public class FSAppAttempt extends SchedulerApplicationAttempt Resources.subtractFrom(availableResources, node.getUnallocatedResource()); } - if (availableResources.getMemory() < 0) { + if (availableResources.getMemorySize() < 0) { availableResources.setMemory(0); } if (availableResources.getVirtualCores() < 0) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java index d9fac905de5..035c60cc59a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSParentQueue.java @@ -128,7 +128,7 @@ public class FSParentQueue extends FSQueue { public Resource getDemand() { readLock.lock(); try { - return Resource.newInstance(demand.getMemory(), demand.getVirtualCores()); + return Resource.newInstance(demand.getMemorySize(), demand.getVirtualCores()); } finally { readLock.unlock(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java index a0e9453f7ce..f50c35898ce 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueue.java @@ -135,18 +135,18 @@ public abstract class FSQueue implements Queue, Schedulable { QueueInfo queueInfo = recordFactory.newRecordInstance(QueueInfo.class); queueInfo.setQueueName(getQueueName()); - if (scheduler.getClusterResource().getMemory() == 0) { + if (scheduler.getClusterResource().getMemorySize() == 0) { queueInfo.setCapacity(0.0f); } else { - queueInfo.setCapacity((float) getFairShare().getMemory() / - scheduler.getClusterResource().getMemory()); + queueInfo.setCapacity((float) getFairShare().getMemorySize() / + scheduler.getClusterResource().getMemorySize()); } - if (getFairShare().getMemory() == 0) { + if (getFairShare().getMemorySize() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { - queueInfo.setCurrentCapacity((float) getResourceUsage().getMemory() / - getFairShare().getMemory()); + queueInfo.setCurrentCapacity((float) getResourceUsage().getMemorySize() / + getFairShare().getMemorySize()); } ArrayList childQueueInfos = new ArrayList(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java index 109cce578ea..42c8825735d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSQueueMetrics.java @@ -24,6 +24,7 @@ import org.apache.hadoop.metrics2.annotation.Metric; import org.apache.hadoop.metrics2.annotation.Metrics; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.metrics2.lib.MutableGaugeInt; +import org.apache.hadoop.metrics2.lib.MutableGaugeLong; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.Queue; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; @@ -31,14 +32,14 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.QueueMetrics; @Metrics(context="yarn") public class FSQueueMetrics extends QueueMetrics { - @Metric("Fair share of memory in MB") MutableGaugeInt fairShareMB; - @Metric("Fair share of CPU in vcores") MutableGaugeInt fairShareVCores; - @Metric("Steady fair share of memory in MB") MutableGaugeInt steadyFairShareMB; - @Metric("Steady fair share of CPU in vcores") MutableGaugeInt steadyFairShareVCores; - @Metric("Minimum share of memory in MB") MutableGaugeInt minShareMB; - @Metric("Minimum share of CPU in vcores") MutableGaugeInt minShareVCores; - @Metric("Maximum share of memory in MB") MutableGaugeInt maxShareMB; - @Metric("Maximum share of CPU in vcores") MutableGaugeInt maxShareVCores; + @Metric("Fair share of memory in MB") MutableGaugeLong fairShareMB; + @Metric("Fair share of CPU in vcores") MutableGaugeLong fairShareVCores; + @Metric("Steady fair share of memory in MB") MutableGaugeLong steadyFairShareMB; + @Metric("Steady fair share of CPU in vcores") MutableGaugeLong steadyFairShareVCores; + @Metric("Minimum share of memory in MB") MutableGaugeLong minShareMB; + @Metric("Minimum share of CPU in vcores") MutableGaugeLong minShareVCores; + @Metric("Maximum share of memory in MB") MutableGaugeLong maxShareMB; + @Metric("Maximum share of CPU in vcores") MutableGaugeLong maxShareVCores; @Metric("Maximum number of applications") MutableGaugeInt maxApps; private String schedulingPolicy; @@ -49,54 +50,54 @@ public class FSQueueMetrics extends QueueMetrics { } public void setFairShare(Resource resource) { - fairShareMB.set(resource.getMemory()); + fairShareMB.set(resource.getMemorySize()); fairShareVCores.set(resource.getVirtualCores()); } - public int getFairShareMB() { + public long getFairShareMB() { return fairShareMB.value(); } - public int getFairShareVirtualCores() { + public long getFairShareVirtualCores() { return fairShareVCores.value(); } public void setSteadyFairShare(Resource resource) { - steadyFairShareMB.set(resource.getMemory()); + steadyFairShareMB.set(resource.getMemorySize()); steadyFairShareVCores.set(resource.getVirtualCores()); } - public int getSteadyFairShareMB() { + public long getSteadyFairShareMB() { return steadyFairShareMB.value(); } - public int getSteadyFairShareVCores() { + public long getSteadyFairShareVCores() { return steadyFairShareVCores.value(); } public void setMinShare(Resource resource) { - minShareMB.set(resource.getMemory()); + minShareMB.set(resource.getMemorySize()); minShareVCores.set(resource.getVirtualCores()); } - public int getMinShareMB() { + public long getMinShareMB() { return minShareMB.value(); } - public int getMinShareVirtualCores() { + public long getMinShareVirtualCores() { return minShareVCores.value(); } public void setMaxShare(Resource resource) { - maxShareMB.set(resource.getMemory()); + maxShareMB.set(resource.getMemorySize()); maxShareVCores.set(resource.getVirtualCores()); } - public int getMaxShareMB() { + public long getMaxShareMB() { return maxShareMB.value(); } - public int getMaxShareVirtualCores() { + public long getMaxShareVirtualCores() { return maxShareVCores.value(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 6a3143fbdfe..c8e84061d17 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -472,7 +472,7 @@ public class FairScheduler extends } private boolean isResourceGreaterThanNone(Resource toPreempt) { - return (toPreempt.getMemory() > 0) || (toPreempt.getVirtualCores() > 0); + return (toPreempt.getMemorySize() > 0) || (toPreempt.getVirtualCores() > 0); } protected void warnOrKillContainer(RMContainer container) { @@ -559,7 +559,7 @@ public class FairScheduler extends double weight = 1.0; if (sizeBasedWeight) { // Set weight based on current memory demand - weight = Math.log1p(app.getDemand().getMemory()) / Math.log(2); + weight = Math.log1p(app.getDemand().getMemorySize()) / Math.log(2); } weight *= app.getPriority().getPriority(); if (weightAdjuster != null) { @@ -1214,7 +1214,7 @@ public class FairScheduler extends if (preemptionEnabled) { Resource clusterResource = getClusterResource(); return (preemptionUtilizationThreshold < Math.max( - (float) rootMetrics.getAllocatedMB() / clusterResource.getMemory(), + (float) rootMetrics.getAllocatedMB() / clusterResource.getMemorySize(), (float) rootMetrics.getAllocatedVirtualCores() / clusterResource.getVirtualCores())); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java index f4fad32db5b..5f500cd9495 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/ComputeFairShares.java @@ -124,15 +124,15 @@ public class ComputeFairShares { // have met all Schedulables' max shares. int totalMaxShare = 0; for (Schedulable sched : schedulables) { - int maxShare = getResourceValue(sched.getMaxShare(), type); - totalMaxShare = (int) Math.min((long)maxShare + (long)totalMaxShare, + long maxShare = getResourceValue(sched.getMaxShare(), type); + totalMaxShare = (int) Math.min(maxShare + (long)totalMaxShare, Integer.MAX_VALUE); if (totalMaxShare == Integer.MAX_VALUE) { break; } } - int totalResource = Math.max((getResourceValue(totalResources, type) - + long totalResource = Math.max((getResourceValue(totalResources, type) - takenResources), 0); totalResource = Math.min(totalMaxShare, totalResource); @@ -207,7 +207,7 @@ public class ComputeFairShares { int totalResource = 0; for (Schedulable sched : schedulables) { - int fixedShare = getFairShareIfFixed(sched, isSteadyShare, type); + long fixedShare = getFairShareIfFixed(sched, isSteadyShare, type); if (fixedShare < 0) { nonFixedSchedulables.add(sched); } else { @@ -229,7 +229,7 @@ public class ComputeFairShares { * The fairshare is fixed if either the maxShare is 0, weight is 0, * or the Schedulable is not active for instantaneous fairshare. */ - private static int getFairShareIfFixed(Schedulable sched, + private static long getFairShareIfFixed(Schedulable sched, boolean isSteadyShare, ResourceType type) { // Check if maxShare is 0 @@ -245,17 +245,17 @@ public class ComputeFairShares { // Check if weight is 0 if (sched.getWeights().getWeight(type) <= 0) { - int minShare = getResourceValue(sched.getMinShare(), type); + long minShare = getResourceValue(sched.getMinShare(), type); return (minShare <= 0) ? 0 : minShare; } return -1; } - private static int getResourceValue(Resource resource, ResourceType type) { + private static long getResourceValue(Resource resource, ResourceType type) { switch (type) { case MEMORY: - return resource.getMemory(); + return resource.getMemorySize(); case CPU: return resource.getVirtualCores(); default: @@ -263,7 +263,7 @@ public class ComputeFairShares { } } - private static void setResourceValue(int val, Resource resource, ResourceType type) { + private static void setResourceValue(long val, Resource resource, ResourceType type) { switch (type) { case MEMORY: resource.setMemory(val); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java index 45fbf982832..623437a3b1c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/DominantResourceFairnessPolicy.java @@ -101,13 +101,13 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy { @Override public Resource getHeadroom(Resource queueFairShare, Resource queueUsage, Resource maxAvailable) { - int queueAvailableMemory = - Math.max(queueFairShare.getMemory() - queueUsage.getMemory(), 0); + long queueAvailableMemory = + Math.max(queueFairShare.getMemorySize() - queueUsage.getMemorySize(), 0); int queueAvailableCPU = Math.max(queueFairShare.getVirtualCores() - queueUsage .getVirtualCores(), 0); Resource headroom = Resources.createResource( - Math.min(maxAvailable.getMemory(), queueAvailableMemory), + Math.min(maxAvailable.getMemorySize(), queueAvailableMemory), Math.min(maxAvailable.getVirtualCores(), queueAvailableCPU)); return headroom; @@ -180,8 +180,8 @@ public class DominantResourceFairnessPolicy extends SchedulingPolicy { */ void calculateShares(Resource resource, Resource pool, ResourceWeights shares, ResourceType[] resourceOrder, ResourceWeights weights) { - shares.setWeight(MEMORY, (float)resource.getMemory() / - (pool.getMemory() * weights.getWeight(MEMORY))); + shares.setWeight(MEMORY, (float)resource.getMemorySize() / + (pool.getMemorySize() * weights.getWeight(MEMORY))); shares.setWeight(CPU, (float)resource.getVirtualCores() / (pool.getVirtualCores() * weights.getWeight(CPU))); // sort order vector by resource share diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java index 3b9f07fb864..42d0420325f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FairSharePolicy.java @@ -82,13 +82,13 @@ public class FairSharePolicy extends SchedulingPolicy { s1.getResourceUsage(), minShare1); boolean s2Needy = Resources.lessThan(RESOURCE_CALCULATOR, null, s2.getResourceUsage(), minShare2); - minShareRatio1 = (double) s1.getResourceUsage().getMemory() - / Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE).getMemory(); - minShareRatio2 = (double) s2.getResourceUsage().getMemory() - / Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE).getMemory(); - useToWeightRatio1 = s1.getResourceUsage().getMemory() / + minShareRatio1 = (double) s1.getResourceUsage().getMemorySize() + / Resources.max(RESOURCE_CALCULATOR, null, minShare1, ONE).getMemorySize(); + minShareRatio2 = (double) s2.getResourceUsage().getMemorySize() + / Resources.max(RESOURCE_CALCULATOR, null, minShare2, ONE).getMemorySize(); + useToWeightRatio1 = s1.getResourceUsage().getMemorySize() / s1.getWeights().getWeight(ResourceType.MEMORY); - useToWeightRatio2 = s2.getResourceUsage().getMemory() / + useToWeightRatio2 = s2.getResourceUsage().getMemorySize() / s2.getWeights().getWeight(ResourceType.MEMORY); int res = 0; if (s1Needy && !s2Needy) @@ -124,10 +124,10 @@ public class FairSharePolicy extends SchedulingPolicy { @Override public Resource getHeadroom(Resource queueFairShare, Resource queueUsage, Resource maxAvailable) { - int queueAvailableMemory = Math.max( - queueFairShare.getMemory() - queueUsage.getMemory(), 0); + long queueAvailableMemory = Math.max( + queueFairShare.getMemorySize() - queueUsage.getMemorySize(), 0); Resource headroom = Resources.createResource( - Math.min(maxAvailable.getMemory(), queueAvailableMemory), + Math.min(maxAvailable.getMemorySize(), queueAvailableMemory), maxAvailable.getVirtualCores()); return headroom; } @@ -152,7 +152,7 @@ public class FairSharePolicy extends SchedulingPolicy { @Override public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMResource) { - return usage.getMemory() > maxAMResource.getMemory(); + return usage.getMemorySize() > maxAMResource.getMemorySize(); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java index a644e584ed4..c277df1d024 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/policies/FifoPolicy.java @@ -115,16 +115,16 @@ public class FifoPolicy extends SchedulingPolicy { @Override public boolean checkIfAMResourceUsageOverLimit(Resource usage, Resource maxAMResource) { - return usage.getMemory() > maxAMResource.getMemory(); + return usage.getMemorySize() > maxAMResource.getMemorySize(); } @Override public Resource getHeadroom(Resource queueFairShare, Resource queueUsage, Resource maxAvailable) { - int queueAvailableMemory = Math.max( - queueFairShare.getMemory() - queueUsage.getMemory(), 0); + long queueAvailableMemory = Math.max( + queueFairShare.getMemorySize() - queueUsage.getMemorySize(), 0); Resource headroom = Resources.createResource( - Math.min(maxAvailable.getMemory(), queueAvailableMemory), + Math.min(maxAvailable.getMemorySize(), queueAvailableMemory), maxAvailable.getVirtualCores()); return headroom; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index 3e6225ff881..796b0cfaeec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -143,11 +143,11 @@ public class FifoScheduler extends queueInfo.setQueueName(DEFAULT_QUEUE.getQueueName()); queueInfo.setCapacity(1.0f); Resource clusterResource = getClusterResource(); - if (clusterResource.getMemory() == 0) { + if (clusterResource.getMemorySize() == 0) { queueInfo.setCurrentCapacity(0.0f); } else { - queueInfo.setCurrentCapacity((float) usedResource.getMemory() - / clusterResource.getMemory()); + queueInfo.setCurrentCapacity((float) usedResource.getMemorySize() + / clusterResource.getMemorySize()); } queueInfo.setMaximumCapacity(1.0f); queueInfo.setChildQueues(new ArrayList()); @@ -685,7 +685,7 @@ public class FifoScheduler extends return assignedContainers; } - private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application, + private int assignContainer(FiCaSchedulerNode node, FiCaSchedulerApp application, Priority priority, int assignableContainers, ResourceRequest request, NodeType type) { LOG.debug("assignContainers:" + @@ -697,9 +697,10 @@ public class FifoScheduler extends Resource capability = request.getCapability(); // TODO: A buggy application with this zero would crash the scheduler. - int availableContainers = node.getUnallocatedResource().getMemory() / - capability.getMemory(); - int assignedContainers = + int availableContainers = + (int) (node.getUnallocatedResource().getMemorySize() / + capability.getMemorySize()); + int assignedContainers = Math.min(assignableContainers, availableContainers); if (assignedContainers > 0) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java index 04cd53afd0e..3cfcd7a6f1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/policy/FairOrderingPolicy.java @@ -23,7 +23,6 @@ import java.util.*; import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.*; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; /** @@ -67,10 +66,10 @@ public class FairOrderingPolicy extends AbstractCom private double getMagnitude(SchedulableEntity r) { double mag = r.getSchedulingResourceUsage().getCachedUsed( - CommonNodeLabelsManager.ANY).getMemory(); + CommonNodeLabelsManager.ANY).getMemorySize(); if (sizeBasedWeight) { double weight = Math.log1p(r.getSchedulingResourceUsage().getCachedDemand( - CommonNodeLabelsManager.ANY).getMemory()) / Math.log(2); + CommonNodeLabelsManager.ANY).getMemorySize()) / Math.log(2); mag = mag / weight; } return mag; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java index 1099bafb5ba..d4420640101 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/DefaultSchedulerPage.java @@ -54,8 +54,8 @@ class DefaultSchedulerPage extends RmView { @Override public void render(Block html) { info("\'" + sinfo.getQueueName() + "\' Queue Status"). _("Queue State:" , sinfo.getState()). - _("Minimum Queue Memory Capacity:" , Integer.toString(sinfo.getMinQueueMemoryCapacity())). - _("Maximum Queue Memory Capacity:" , Integer.toString(sinfo.getMaxQueueMemoryCapacity())). + _("Minimum Queue Memory Capacity:" , Long.toString(sinfo.getMinQueueMemoryCapacity())). + _("Maximum Queue Memory Capacity:" , Long.toString(sinfo.getMaxQueueMemoryCapacity())). _("Number of Nodes:" , Integer.toString(sinfo.getNumNodes())). _("Used Node Capacity:" , Integer.toString(sinfo.getUsedNodeCapacity())). _("Available Node Capacity:" , Integer.toString(sinfo.getAvailNodeCapacity())). diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java index f257656107c..41b5fe7e4c3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/FairSchedulerAppsBlock.java @@ -115,7 +115,7 @@ public class FairSchedulerAppsBlock extends HtmlBlock { AppInfo appInfo = new AppInfo(rm, app, true, WebAppUtils.getHttpSchemePrefix(conf)); String percent = StringUtils.format("%.1f", appInfo.getProgress()); ApplicationAttemptId attemptId = app.getCurrentAppAttempt().getAppAttemptId(); - int fairShare = fsinfo.getAppFairShare(attemptId); + long fairShare = fsinfo.getAppFairShare(attemptId); if (fairShare == FairSchedulerInfo.INVALID_FAIR_SHARE) { // FairScheduler#applications don't have the entry. Skip it. continue; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java index a4c01df4483..7de1d469052 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/RMWebServices.java @@ -1514,14 +1514,14 @@ public class RMWebServices extends WebServices { String msg = "Requested more cores than configured max"; throw new BadRequestException(msg); } - if (newApp.getResource().getMemory() > rm.getConfig().getInt( + if (newApp.getResource().getMemorySize() > rm.getConfig().getInt( YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB)) { String msg = "Requested more memory than configured max"; throw new BadRequestException(msg); } Resource r = - Resource.newInstance(newApp.getResource().getMemory(), newApp + Resource.newInstance(newApp.getResource().getMemorySize(), newApp .getResource().getvCores()); return r; } @@ -2012,7 +2012,7 @@ public class RMWebServices extends WebServices { .getReservationRequest()) { ResourceInfo rInfo = resReqInfo.getCapability(); Resource capability = - Resource.newInstance(rInfo.getMemory(), rInfo.getvCores()); + Resource.newInstance(rInfo.getMemorySize(), rInfo.getvCores()); int numContainers = resReqInfo.getNumContainers(); int minConcurrency = resReqInfo.getMinConcurrency(); long duration = resReqInfo.getDuration(); @@ -2125,7 +2125,7 @@ public class RMWebServices extends WebServices { .getReservationRequest()) { ResourceInfo rInfo = resReqInfo.getCapability(); Resource capability = - Resource.newInstance(rInfo.getMemory(), rInfo.getvCores()); + Resource.newInstance(rInfo.getMemorySize(), rInfo.getvCores()); int numContainers = resReqInfo.getNumContainers(); int minConcurrency = resReqInfo.getMinConcurrency(); long duration = resReqInfo.getDuration(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java index f51197b83f0..63b601deab0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/AppInfo.java @@ -82,8 +82,8 @@ public class AppInfo { protected long elapsedTime; protected String amContainerLogs; protected String amHostHttpAddress; - protected int allocatedMB; - protected int allocatedVCores; + protected long allocatedMB; + protected long allocatedVCores; protected int runningContainers; protected long memorySeconds; protected long vcoreSeconds; @@ -91,8 +91,8 @@ public class AppInfo { protected float clusterUsagePercentage; // preemption info fields - protected int preemptedResourceMB; - protected int preemptedResourceVCores; + protected long preemptedResourceMB; + protected long preemptedResourceVCores; protected int numNonAMContainerPreempted; protected int numAMContainerPreempted; @@ -174,7 +174,7 @@ public class AppInfo { .getApplicationResourceUsageReport(); if (resourceReport != null) { Resource usedResources = resourceReport.getUsedResources(); - allocatedMB = usedResources.getMemory(); + allocatedMB = usedResources.getMemorySize(); allocatedVCores = usedResources.getVirtualCores(); runningContainers = resourceReport.getNumUsedContainers(); queueUsagePercentage = resourceReport.getQueueUsagePercentage(); @@ -190,7 +190,7 @@ public class AppInfo { numAMContainerPreempted = appMetrics.getNumAMContainersPreempted(); preemptedResourceMB = - appMetrics.getResourcePreempted().getMemory(); + appMetrics.getResourcePreempted().getMemorySize(); numNonAMContainerPreempted = appMetrics.getNumNonAMContainersPreempted(); preemptedResourceVCores = @@ -302,19 +302,19 @@ public class AppInfo { return this.runningContainers; } - public int getAllocatedMB() { + public long getAllocatedMB() { return this.allocatedMB; } - public int getAllocatedVCores() { + public long getAllocatedVCores() { return this.allocatedVCores; } - public int getPreemptedMB() { + public long getPreemptedMB() { return preemptedResourceMB; } - public int getPreemptedVCores() { + public long getPreemptedVCores() { return preemptedResourceVCores; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java index f97ff8ae64b..5355d4b9ef2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerInfo.java @@ -54,10 +54,10 @@ public class FairSchedulerInfo extends SchedulerInfo { * FairSchedulerInfo#INVALID_FAIR_SHARE if the scheduler does * not know about this application attempt. */ - public int getAppFairShare(ApplicationAttemptId appAttemptId) { + public long getAppFairShare(ApplicationAttemptId appAttemptId) { FSAppAttempt fsAppAttempt = scheduler.getSchedulerApp(appAttemptId); return fsAppAttempt == null ? - INVALID_FAIR_SHARE : fsAppAttempt.getFairShare().getMemory(); + INVALID_FAIR_SHARE : fsAppAttempt.getFairShare().getMemorySize(); } public FairSchedulerQueueInfo getRootQueueInfo() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java index 5ab39e4a3a4..a09b92f25f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerQueueInfo.java @@ -83,8 +83,8 @@ public class FairSchedulerQueueInfo { usedResources = new ResourceInfo(queue.getResourceUsage()); demandResources = new ResourceInfo(queue.getDemand()); - fractionMemUsed = (float)usedResources.getMemory() / - clusterResources.getMemory(); + fractionMemUsed = (float)usedResources.getMemorySize() / + clusterResources.getMemorySize(); steadyFairResources = new ResourceInfo(queue.getSteadyFairShare()); fairResources = new ResourceInfo(queue.getFairShare()); @@ -95,11 +95,11 @@ public class FairSchedulerQueueInfo { scheduler.getClusterResource())); fractionMemSteadyFairShare = - (float)steadyFairResources.getMemory() / clusterResources.getMemory(); - fractionMemFairShare = (float) fairResources.getMemory() - / clusterResources.getMemory(); - fractionMemMinShare = (float)minResources.getMemory() / clusterResources.getMemory(); - fractionMemMaxShare = (float)maxResources.getMemory() / clusterResources.getMemory(); + (float)steadyFairResources.getMemorySize() / clusterResources.getMemorySize(); + fractionMemFairShare = (float) fairResources.getMemorySize() + / clusterResources.getMemorySize(); + fractionMemMinShare = (float)minResources.getMemorySize() / clusterResources.getMemorySize(); + fractionMemMaxShare = (float)maxResources.getMemorySize() / clusterResources.getMemorySize(); maxApps = allocConf.getQueueMaxApps(queueName); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java index bd940d1c3c9..1752546ea93 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FifoSchedulerInfo.java @@ -40,8 +40,8 @@ public class FifoSchedulerInfo extends SchedulerInfo { protected float capacity; protected float usedCapacity; protected QueueState qstate; - protected int minQueueMemoryCapacity; - protected int maxQueueMemoryCapacity; + protected long minQueueMemoryCapacity; + protected long maxQueueMemoryCapacity; protected int numNodes; protected int usedNodeCapacity; protected int availNodeCapacity; @@ -67,8 +67,8 @@ public class FifoSchedulerInfo extends SchedulerInfo { this.usedCapacity = qInfo.getCurrentCapacity(); this.capacity = qInfo.getCapacity(); - this.minQueueMemoryCapacity = fs.getMinimumResourceCapability().getMemory(); - this.maxQueueMemoryCapacity = fs.getMaximumResourceCapability().getMemory(); + this.minQueueMemoryCapacity = fs.getMinimumResourceCapability().getMemorySize(); + this.maxQueueMemoryCapacity = fs.getMaximumResourceCapability().getMemorySize(); this.qstate = qInfo.getQueueState(); this.numNodes = rmContext.getRMNodes().size(); @@ -79,9 +79,9 @@ public class FifoSchedulerInfo extends SchedulerInfo { for (RMNode ni : rmContext.getRMNodes().values()) { SchedulerNodeReport report = fs.getNodeReport(ni.getNodeID()); - this.usedNodeCapacity += report.getUsedResource().getMemory(); - this.availNodeCapacity += report.getAvailableResource().getMemory(); - this.totalNodeCapacity += ni.getTotalCapability().getMemory(); + this.usedNodeCapacity += report.getUsedResource().getMemorySize(); + this.availNodeCapacity += report.getAvailableResource().getMemorySize(); + this.totalNodeCapacity += ni.getTotalCapability().getMemorySize(); this.numContainers += fs.getNodeReport(ni.getNodeID()).getNumContainers(); } } @@ -114,11 +114,11 @@ public class FifoSchedulerInfo extends SchedulerInfo { return this.qName; } - public int getMinQueueMemoryCapacity() { + public long getMinQueueMemoryCapacity() { return this.minQueueMemoryCapacity; } - public int getMaxQueueMemoryCapacity() { + public long getMaxQueueMemoryCapacity() { return this.maxQueueMemoryCapacity; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java index 0f877f855a1..4a6aa4b47d7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/NodeInfo.java @@ -63,8 +63,8 @@ public class NodeInfo { this.availMemoryMB = 0; if (report != null) { this.numContainers = report.getNumContainers(); - this.usedMemoryMB = report.getUsedResource().getMemory(); - this.availMemoryMB = report.getAvailableResource().getMemory(); + this.usedMemoryMB = report.getUsedResource().getMemorySize(); + this.availMemoryMB = report.getAvailableResource().getMemorySize(); this.usedVirtualCores = report.getUsedResource().getVirtualCores(); this.availableVirtualCores = report.getAvailableResource().getVirtualCores(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java index 9510f5f5f04..76d43e0c4fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ResourceInfo.java @@ -27,22 +27,22 @@ import org.apache.hadoop.yarn.api.records.Resource; @XmlRootElement @XmlAccessorType(XmlAccessType.FIELD) public class ResourceInfo { - int memory; - int vCores; + long memory; + long vCores; public ResourceInfo() { } public ResourceInfo(Resource res) { - memory = res.getMemory(); + memory = res.getMemorySize(); vCores = res.getVirtualCores(); } - public int getMemory() { + public long getMemorySize() { return memory; } - public int getvCores() { + public long getvCores() { return vCores; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java index 17cf88593b3..60233518148 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java @@ -194,7 +194,7 @@ public class Application { Resource currentSpec = requestSpec.put(priority, capability); if (currentSpec != null) { throw new IllegalStateException("Resource spec already exists for " + - "priority " + priority.getPriority() + " - " + currentSpec.getMemory()); + "priority " + priority.getPriority() + " - " + currentSpec.getMemorySize()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java index 4cec29ef24a..04ea51c99ff 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java @@ -50,7 +50,7 @@ public class MockNM { private int responseId; private NodeId nodeId; - private int memory; + private long memory; private int vCores; private ResourceTrackerService resourceTracker; private int httpPort = 2; @@ -144,7 +144,7 @@ public class MockNM { this.currentNMTokenMasterKey = registrationResponse.getNMTokenMasterKey(); Resource newResource = registrationResponse.getResource(); if (newResource != null) { - memory = newResource.getMemory(); + memory = (int) newResource.getMemorySize(); vCores = newResource.getVirtualCores(); } return registrationResponse; @@ -219,14 +219,14 @@ public class MockNM { Resource newResource = heartbeatResponse.getResource(); if (newResource != null) { - memory = newResource.getMemory(); + memory = newResource.getMemorySize(); vCores = newResource.getVirtualCores(); } return heartbeatResponse; } - public int getMemory() { + public long getMemory() { return memory; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java index 6367b4b3687..e8454041c42 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNodes.java @@ -89,13 +89,13 @@ public class MockNodes { public static Resource newUsedResource(Resource total) { Resource rs = recordFactory.newRecordInstance(Resource.class); - rs.setMemory((int)(Math.random() * total.getMemory())); + rs.setMemory((int)(Math.random() * total.getMemorySize())); return rs; } public static Resource newAvailResource(Resource total, Resource used) { Resource rs = recordFactory.newRecordInstance(Resource.class); - rs.setMemory(total.getMemory() - used.getMemory()); + rs.setMemory(total.getMemorySize() - used.getMemorySize()); return rs; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index b4ebf9251cd..8c0a907579f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -213,12 +213,12 @@ public class NodeManager implements ContainerManagementProtocol { synchronized public void checkResourceUsage() { LOG.info("Checking resource usage for " + containerManagerAddress); - Assert.assertEquals(available.getMemory(), + Assert.assertEquals(available.getMemorySize(), resourceManager.getResourceScheduler().getNodeReport( - this.nodeId).getAvailableResource().getMemory()); - Assert.assertEquals(used.getMemory(), + this.nodeId).getAvailableResource().getMemorySize()); + Assert.assertEquals(used.getMemorySize(), resourceManager.getResourceScheduler().getNodeReport( - this.nodeId).getUsedResource().getMemory()); + this.nodeId).getUsedResource().getMemorySize()); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ReservationACLsTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ReservationACLsTestBase.java index 518da6105a9..aa5acc6279f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ReservationACLsTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/ReservationACLsTestBase.java @@ -488,7 +488,7 @@ public class ReservationACLsTestBase extends ACLsTestBase { private boolean checkCapacity(Collection plans) { for (Plan plan : plans) { - if (plan.getTotalCapacity().getMemory() > 0) { + if (plan.getTotalCapacity().getMemorySize() > 0) { return true; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index d6d697ece03..fa1106c2304 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -387,11 +387,11 @@ public class TestApplicationACLs { Assert.assertEquals("Enemy should not see app reserved containers", -1, usageReport.getNumReservedContainers()); Assert.assertEquals("Enemy should not see app used resources", - -1, usageReport.getUsedResources().getMemory()); + -1, usageReport.getUsedResources().getMemorySize()); Assert.assertEquals("Enemy should not see app reserved resources", - -1, usageReport.getReservedResources().getMemory()); + -1, usageReport.getReservedResources().getMemorySize()); Assert.assertEquals("Enemy should not see app needed resources", - -1, usageReport.getNeededResources().getMemory()); + -1, usageReport.getNeededResources().getMemorySize()); } private void verifyInvalidQueueWithAcl() throws Exception { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java index 2b6edda2d22..a7d8ba25137 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationCleanup.java @@ -534,7 +534,7 @@ public class TestApplicationCleanup { // 4. Verify Memory Usage by cluster, it should be 3072. AM memory + // requested memory. 1024 + 2048=3072 ResourceScheduler rs = rm1.getRMContext().getScheduler(); - int allocatedMB = rs.getRootQueueMetrics().getAllocatedMB(); + long allocatedMB = rs.getRootQueueMetrics().getAllocatedMB(); Assert.assertEquals(amMemory + containerMemory, allocatedMB); // 5. Re-register NM by sending completed container status diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java index 430fc0f265b..c8a894f55f3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestContainerResourceUsage.java @@ -420,7 +420,7 @@ public class TestContainerResourceUsage { Resource resource = rmContainer.getContainer().getResource(); long usedMillis = rmContainer.getFinishTime() - rmContainer.getCreationTime(); - long memorySeconds = resource.getMemory() + long memorySeconds = resource.getMemorySize() * usedMillis / DateUtils.MILLIS_PER_SECOND; long vcoreSeconds = resource.getVirtualCores() * usedMillis / DateUtils.MILLIS_PER_SECOND; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDistributedSchedulingService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDistributedSchedulingService.java index 1982776d187..5d5ab789f86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDistributedSchedulingService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestDistributedSchedulingService.java @@ -161,7 +161,7 @@ public class TestDistributedSchedulingService { Assert.assertEquals(4, dsRegResp.getMaxAllocatableCapabilty().getVirtualCores()); Assert.assertEquals(1024, - dsRegResp.getMinAllocatableCapabilty().getMemory()); + dsRegResp.getMinAllocatableCapabilty().getMemorySize()); Assert.assertEquals(2, dsRegResp.getIncrAllocatableCapabilty().getVirtualCores()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java index 84549475d92..69c62e7a664 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRM.java @@ -21,7 +21,6 @@ package org.apache.hadoop.yarn.server.resourcemanager; import org.junit.Before; import static org.mockito.Matchers.argThat; import static org.mockito.Mockito.doNothing; -import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.spy; import java.util.ArrayList; @@ -45,7 +44,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationsResponse; import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse; import org.apache.hadoop.yarn.api.protocolrecords.KillApplicationRequest; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -61,8 +59,6 @@ import org.apache.hadoop.yarn.event.AbstractEvent; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; -import org.apache.hadoop.yarn.server.resourcemanager.TestRMRestart.TestSecurityMockRM; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; @@ -79,8 +75,6 @@ import org.apache.log4j.LogManager; import org.apache.log4j.Logger; import org.junit.Test; import org.mockito.ArgumentMatcher; -import org.mockito.invocation.InvocationOnMock; -import org.mockito.stubbing.Answer; @SuppressWarnings({"unchecked", "rawtypes"}) public class TestRM extends ParameterizedSchedulerTestBase { @@ -112,7 +106,7 @@ public class TestRM extends ParameterizedSchedulerTestBase { GetNewApplicationResponse resp = rm.getNewAppId(); assert (resp.getApplicationId().getId() != 0); - assert (resp.getMaximumResourceCapability().getMemory() > 0); + assert (resp.getMaximumResourceCapability().getMemorySize() > 0); rm.stop(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java index abd59b249f5..905a42ce72b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMHA.java @@ -711,7 +711,7 @@ public class TestRMHA { } private void verifyClusterMetrics(int activeNodes, int appsSubmitted, - int appsPending, int containersPending, int availableMB, + int appsPending, int containersPending, long availableMB, int activeApplications) throws Exception { int timeoutSecs = 0; QueueMetrics metrics = rm.getResourceScheduler().getRootQueueMetrics(); @@ -742,7 +742,7 @@ public class TestRMHA { assertTrue(message, isAllMetricAssertionDone); } - private void assertMetric(String metricName, int expected, int actual) { + private void assertMetric(String metricName, long expected, long actual) { assertEquals("Incorrect value for metric " + metricName, expected, actual); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java index 7c03574cbcd..16fe99890d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMNodeTransitions.java @@ -31,7 +31,6 @@ import java.util.Collections; import java.util.List; import java.util.Random; -import org.apache.hadoop.net.Node; import org.apache.hadoop.util.HostsFileReader; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -50,7 +49,6 @@ import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer .AllocationExpirationInfo; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.ContainerAllocationExpirer; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeCleanContainerEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; @@ -869,13 +867,13 @@ public class TestRMNodeTransitions { public void testResourceUpdateOnRunningNode() { RMNodeImpl node = getRunningNode(); Resource oldCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("Memory resource is not match.", oldCapacity.getMemorySize(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), ResourceOption.newInstance(Resource.newInstance(2048, 2), ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); Resource newCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); + assertEquals("Memory resource is not match.", newCapacity.getMemorySize(), 2048); assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); Assert.assertEquals(NodeState.RUNNING, node.getState()); @@ -893,13 +891,13 @@ public class TestRMNodeTransitions { public void testResourceUpdateOnNewNode() { RMNodeImpl node = getNewNode(Resource.newInstance(4096, 4)); Resource oldCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("Memory resource is not match.", oldCapacity.getMemorySize(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), ResourceOption.newInstance(Resource.newInstance(2048, 2), ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); Resource newCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); + assertEquals("Memory resource is not match.", newCapacity.getMemorySize(), 2048); assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); Assert.assertEquals(NodeState.NEW, node.getState()); @@ -913,13 +911,13 @@ public class TestRMNodeTransitions { int initialUnHealthy = cm.getUnhealthyNMs(); int initialDecommissioning = cm.getNumDecommissioningNMs(); Resource oldCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("Memory resource is not match.", oldCapacity.getMemorySize(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), ResourceOption .newInstance(Resource.newInstance(2048, 2), ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); Resource newCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); + assertEquals("Memory resource is not match.", newCapacity.getMemorySize(), 2048); assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); Assert.assertEquals(NodeState.REBOOTED, node.getState()); @@ -994,16 +992,16 @@ public class TestRMNodeTransitions { public void testResourceUpdateOnDecommissioningNode() { RMNodeImpl node = getDecommissioningNode(); Resource oldCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("Memory resource is not match.", oldCapacity.getMemorySize(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); node.handle(new RMNodeResourceUpdateEvent(node.getNodeID(), ResourceOption.newInstance(Resource.newInstance(2048, 2), ResourceOption.OVER_COMMIT_TIMEOUT_MILLIS_DEFAULT))); Resource originalCapacity = node.getOriginalTotalCapability(); - assertEquals("Memory resource is not match.", originalCapacity.getMemory(), oldCapacity.getMemory()); + assertEquals("Memory resource is not match.", originalCapacity.getMemorySize(), oldCapacity.getMemorySize()); assertEquals("CPU resource is not match.", originalCapacity.getVirtualCores(), oldCapacity.getVirtualCores()); Resource newCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", newCapacity.getMemory(), 2048); + assertEquals("Memory resource is not match.", newCapacity.getMemorySize(), 2048); assertEquals("CPU resource is not match.", newCapacity.getVirtualCores(), 2); Assert.assertEquals(NodeState.DECOMMISSIONING, node.getState()); @@ -1016,7 +1014,7 @@ public class TestRMNodeTransitions { public void testResourceUpdateOnRecommissioningNode() { RMNodeImpl node = getDecommissioningNode(); Resource oldCapacity = node.getTotalCapability(); - assertEquals("Memory resource is not match.", oldCapacity.getMemory(), 4096); + assertEquals("Memory resource is not match.", oldCapacity.getMemorySize(), 4096); assertEquals("CPU resource is not match.", oldCapacity.getVirtualCores(), 4); node.handle(new RMNodeEvent(node.getNodeID(), RMNodeEventType.RECOMMISSION)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java index c13d72de354..5a6fe67e2b1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestReservationSystemWithRMHA.java @@ -193,7 +193,7 @@ public class TestReservationSystemWithRMHA extends RMHATestBase { .synchronizePlan(ReservationSystemTestUtil.reservationQ, false); if (rm.getRMContext().getReservationSystem() .getPlan(ReservationSystemTestUtil.reservationQ).getTotalCapacity() - .getMemory() > 0) { + .getMemorySize() > 0) { break; } LOG.info("Waiting for node capacity to be added to plan"); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java index cac4511287a..73bef0c7b71 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java @@ -1058,7 +1058,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase { rm.drainEvents(); RMNode rmNode = rm.getRMContext().getRMNodes().get(nm1.getNodeId()); Assert.assertEquals(3, rmNode.getHttpPort()); - Assert.assertEquals(5120, rmNode.getTotalCapability().getMemory()); + Assert.assertEquals(5120, rmNode.getTotalCapability().getMemorySize()); Assert.assertEquals(5120 + 15360, metrics.getAvailableMB()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java index b6d6f691e60..458e8c3eceb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestWorkPreservingRMRestart.java @@ -87,7 +87,6 @@ import org.junit.Test; import java.io.File; import java.io.IOException; import java.net.UnknownHostException; -import java.security.PrivilegedAction; import java.security.PrivilegedExceptionAction; import java.util.ArrayList; import java.util.Arrays; @@ -417,15 +416,15 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase // ************* check Queue metrics ************ QueueMetrics queueMetrics = queue.getMetrics(); - assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(), - availableResources.getVirtualCores(), usedResource.getMemory(), + assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemorySize(), + availableResources.getVirtualCores(), usedResource.getMemorySize(), usedResource.getVirtualCores()); // ************ check user metrics *********** QueueMetrics userMetrics = queueMetrics.getUserMetrics(app.getUser()); - assertMetrics(userMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(), - availableResources.getVirtualCores(), usedResource.getMemory(), + assertMetrics(userMetrics, 1, 0, 1, 0, 2, availableResources.getMemorySize(), + availableResources.getVirtualCores(), usedResource.getMemorySize(), usedResource.getVirtualCores()); } @@ -485,8 +484,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase // ************ check queue metrics **************** QueueMetrics queueMetrics = scheduler.getRootQueueMetrics(); - assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(), - availableResources.getVirtualCores(), usedResources.getMemory(), + assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemorySize(), + availableResources.getVirtualCores(), usedResources.getMemorySize(), usedResources.getVirtualCores()); } @@ -697,8 +696,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase q1UsedResource, 4); QueueMetrics queue1Metrics = schedulerApp1_1.getQueue().getMetrics(); assertMetrics(queue1Metrics, 2, 0, 2, 0, 4, - q1availableResources.getMemory(), - q1availableResources.getVirtualCores(), q1UsedResource.getMemory(), + q1availableResources.getMemorySize(), + q1availableResources.getVirtualCores(), q1UsedResource.getMemorySize(), q1UsedResource.getVirtualCores()); // assert queue B state. @@ -708,8 +707,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase q2UsedResource, 2); QueueMetrics queue2Metrics = schedulerApp2.getQueue().getMetrics(); assertMetrics(queue2Metrics, 1, 0, 1, 0, 2, - q2availableResources.getMemory(), - q2availableResources.getVirtualCores(), q2UsedResource.getMemory(), + q2availableResources.getMemorySize(), + q2availableResources.getVirtualCores(), q2UsedResource.getMemorySize(), q2UsedResource.getVirtualCores()); // assert parent queue state. @@ -718,8 +717,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase checkParentQueue(parentQueue, 6, totalUsedResource, (float) 6 / 16, (float) 6 / 16); assertMetrics(parentQueue.getMetrics(), 3, 0, 3, 0, 6, - totalAvailableResource.getMemory(), - totalAvailableResource.getVirtualCores(), totalUsedResource.getMemory(), + totalAvailableResource.getMemorySize(), + totalAvailableResource.getVirtualCores(), totalUsedResource.getMemorySize(), totalUsedResource.getVirtualCores()); } @@ -1137,8 +1136,8 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase private void assertMetrics(QueueMetrics qm, int appsSubmitted, int appsPending, int appsRunning, int appsCompleted, - int allocatedContainers, int availableMB, int availableVirtualCores, - int allocatedMB, int allocatedVirtualCores) { + int allocatedContainers, long availableMB, long availableVirtualCores, + long allocatedMB, long allocatedVirtualCores) { assertEquals(appsSubmitted, qm.getAppsSubmitted()); assertEquals(appsPending, qm.getAppsPending()); assertEquals(appsRunning, qm.getAppsRunning()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java index 3c46f1a2abd..9ea2baa3dc0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/metrics/TestSystemMetricsPublisher.java @@ -424,10 +424,12 @@ public class TestSystemMetricsPublisher { container.getAllocatedNode().getPort(), entity.getOtherInfo().get( ContainerMetricsConstants.ALLOCATED_PORT_ENTITY_INFO)); - Assert.assertEquals( - container.getAllocatedResource().getMemory(), - entity.getOtherInfo().get( - ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO)); + Assert.assertEquals(container.getAllocatedResource().getMemorySize(), + // KeyValueBasedTimelineStore could cast long to integer, need make sure + // variables for compare have same type. + ((Integer) entity.getOtherInfo().get( + ContainerMetricsConstants.ALLOCATED_MEMORY_ENTITY_INFO)) + .longValue()); Assert.assertEquals( container.getAllocatedResource().getVirtualCores(), entity.getOtherInfo().get( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java index 1019548410f..7c8fb2ab222 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicyMockFramework.java @@ -653,12 +653,12 @@ public class ProportionalCapacityPreemptionPolicyMockFramework { public void checkPendingResource(CSQueue queue, String partition, int pending) { ResourceUsage ru = queue.getQueueResourceUsage(); - Assert.assertEquals(pending, ru.getPending(partition).getMemory()); + Assert.assertEquals(pending, ru.getPending(partition).getMemorySize()); } public void checkReservedResource(CSQueue queue, String partition, int reserved) { ResourceUsage ru = queue.getQueueResourceUsage(); - Assert.assertEquals(reserved, ru.getReserved(partition).getMemory()); + Assert.assertEquals(reserved, ru.getReserved(partition).getMemorySize()); } static class IsPreemptionRequestForQueueAndNode diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java index 5b7ac521ae5..e3ef8c28fd4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicy.java @@ -955,14 +955,17 @@ public class TestProportionalCapacityPreemptionPolicy { // which is likely triggered since we use small numbers for readability //run with Logger.getRootLogger().setLevel(Level.DEBUG); verify(mDisp, times(9)).handle(argThat(new IsPreemptionRequestFor(appC))); - assertEquals(10, policy.getQueuePartitions().get("queueE").get("").preemptableExtra.getMemory()); + assertEquals(10, policy.getQueuePartitions().get("queueE").get("").preemptableExtra.getMemorySize()); //2nd level child(E) preempts 10, but parent A has only 9 extra //check the parent can prempt only the extra from > 2 level child - TempQueuePerPartition tempQueueAPartition = policy.getQueuePartitions().get("queueA").get(""); - assertEquals(0, tempQueueAPartition.untouchableExtra.getMemory()); - int extraForQueueA = tempQueueAPartition.getUsed().getMemory() - - tempQueueAPartition.getGuaranteed().getMemory(); - assertEquals(extraForQueueA,tempQueueAPartition.preemptableExtra.getMemory()); + TempQueuePerPartition tempQueueAPartition = policy.getQueuePartitions().get( + "queueA").get(""); + assertEquals(0, tempQueueAPartition.untouchableExtra.getMemorySize()); + long extraForQueueA = + tempQueueAPartition.getUsed().getMemorySize() - tempQueueAPartition + .getGuaranteed().getMemorySize(); + assertEquals(extraForQueueA, + tempQueueAPartition.preemptableExtra.getMemorySize()); } @Test @@ -985,14 +988,18 @@ public class TestProportionalCapacityPreemptionPolicy { policy.editSchedule(); verify(mDisp, times(10)).handle(argThat(new IsPreemptionRequestFor(appC))); - assertEquals(10, policy.getQueuePartitions().get("queueE").get("").preemptableExtra.getMemory()); + assertEquals(10, policy.getQueuePartitions().get("queueE") + .get("").preemptableExtra.getMemorySize()); //2nd level child(E) preempts 10, but parent A has only 9 extra //check the parent can prempt only the extra from > 2 level child - TempQueuePerPartition tempQueueAPartition = policy.getQueuePartitions().get("queueA").get(""); - assertEquals(0, tempQueueAPartition.untouchableExtra.getMemory()); - int extraForQueueA = tempQueueAPartition.getUsed().getMemory() - - tempQueueAPartition.getGuaranteed().getMemory(); - assertEquals(extraForQueueA,tempQueueAPartition.preemptableExtra.getMemory()); + TempQueuePerPartition tempQueueAPartition = policy.getQueuePartitions().get( + "queueA").get(""); + assertEquals(0, tempQueueAPartition.untouchableExtra.getMemorySize()); + long extraForQueueA = + tempQueueAPartition.getUsed().getMemorySize() - tempQueueAPartition + .getGuaranteed().getMemorySize(); + assertEquals(extraForQueueA, + tempQueueAPartition.preemptableExtra.getMemorySize()); } static class IsPreemptionRequestFor @@ -1122,12 +1129,12 @@ public class TestProportionalCapacityPreemptionPolicy { when(root.getAbsoluteCapacity()).thenReturn( Resources.divide(rc, tot, abs[0], tot)); when(root.getAbsoluteMaximumCapacity()).thenReturn( - maxCap[0] / (float) tot.getMemory()); + maxCap[0] / (float) tot.getMemorySize()); when(root.getQueueResourceUsage()).thenReturn(resUsage); QueueCapacities rootQc = new QueueCapacities(true); rootQc.setAbsoluteUsedCapacity(Resources.divide(rc, tot, used[0], tot)); rootQc.setAbsoluteCapacity(Resources.divide(rc, tot, abs[0], tot)); - rootQc.setAbsoluteMaximumCapacity(maxCap[0] / (float) tot.getMemory()); + rootQc.setAbsoluteMaximumCapacity(maxCap[0] / (float) tot.getMemorySize()); when(root.getQueueCapacities()).thenReturn(rootQc); when(root.getQueuePath()).thenReturn(CapacitySchedulerConfiguration.ROOT); boolean preemptionDisabled = mockPreemptionStatus("root"); @@ -1153,13 +1160,13 @@ public class TestProportionalCapacityPreemptionPolicy { when(q.getAbsoluteCapacity()).thenReturn( Resources.divide(rc, tot, abs[i], tot)); when(q.getAbsoluteMaximumCapacity()).thenReturn( - maxCap[i] / (float) tot.getMemory()); + maxCap[i] / (float) tot.getMemorySize()); // We need to make these fields to QueueCapacities QueueCapacities qc = new QueueCapacities(false); qc.setAbsoluteUsedCapacity(Resources.divide(rc, tot, used[i], tot)); qc.setAbsoluteCapacity(Resources.divide(rc, tot, abs[i], tot)); - qc.setAbsoluteMaximumCapacity(maxCap[i] / (float) tot.getMemory()); + qc.setAbsoluteMaximumCapacity(maxCap[i] / (float) tot.getMemorySize()); when(q.getQueueCapacities()).thenReturn(qc); String parentPathName = p.getQueuePath(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java index 08042b5bddf..07d1eefa2f6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/TestProportionalCapacityPreemptionPolicyMockFramework.java @@ -230,17 +230,17 @@ public class TestProportionalCapacityPreemptionPolicyMockFramework // Check host resources Assert.assertEquals(3, this.cs.getAllNodes().size()); SchedulerNode node1 = cs.getSchedulerNode(NodeId.newInstance("n1", 1)); - Assert.assertEquals(100, node1.getTotalResource().getMemory()); + Assert.assertEquals(100, node1.getTotalResource().getMemorySize()); Assert.assertEquals(100, node1.getCopiedListOfRunningContainers().size()); Assert.assertNull(node1.getReservedContainer()); SchedulerNode node2 = cs.getSchedulerNode(NodeId.newInstance("n2", 1)); - Assert.assertEquals(0, node2.getTotalResource().getMemory()); + Assert.assertEquals(0, node2.getTotalResource().getMemorySize()); Assert.assertEquals(50, node2.getCopiedListOfRunningContainers().size()); Assert.assertNotNull(node2.getReservedContainer()); SchedulerNode node3 = cs.getSchedulerNode(NodeId.newInstance("n3", 1)); - Assert.assertEquals(30, node3.getTotalResource().getMemory()); + Assert.assertEquals(30, node3.getTotalResource().getMemorySize()); Assert.assertEquals(100, node3.getCopiedListOfRunningContainers().size()); Assert.assertNull(node3.getReservedContainer()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java index 4a6abbe7770..60d8ce1863b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/nodelabels/TestRMNodeLabelsManager.java @@ -580,7 +580,7 @@ public class TestRMNodeLabelsManager extends NodeLabelTestBase { for (RMNodeLabel info : infos) { if (info.getLabelName().equals(labelName)) { Assert.assertEquals(activeNMs, info.getNumActiveNMs()); - Assert.assertEquals(memory, info.getResource().getMemory()); + Assert.assertEquals(memory, info.getResource().getMemorySize()); return; } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java index 9ea1044b01d..24c386a7170 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/ReservationSystemTestUtil.java @@ -90,7 +90,7 @@ public class ReservationSystemTestUtil { Assert.assertNotNull(plan); Assert.assertTrue(plan instanceof InMemoryPlan); Assert.assertEquals(planQName, plan.getQueueName()); - Assert.assertEquals(8192, plan.getTotalCapacity().getMemory()); + Assert.assertEquals(8192, plan.getTotalCapacity().getMemorySize()); Assert.assertTrue( plan.getReservationAgent() instanceof AlignedPlannerWithGreedy); Assert diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java index 85fafa7ecec..b526484f6bf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/TestRLESparseResourceAllocation.java @@ -349,24 +349,24 @@ public class TestRLESparseResourceAllocation { // does not handle removal of "partial" // allocations correctly. Assert.assertEquals(102400, rleSparseVector.getCapacityAtTime(10) - .getMemory()); - Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemory()); - Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(19).getMemory()); + .getMemorySize()); + Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemorySize()); + Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(19).getMemorySize()); Assert.assertEquals(102400, rleSparseVector.getCapacityAtTime(21) - .getMemory()); + .getMemorySize()); Assert.assertEquals(2 * 102400, rleSparseVector.getCapacityAtTime(26) - .getMemory()); + .getMemorySize()); ReservationInterval riRemove2 = new ReservationInterval(9, 13); rleSparseVector.removeInterval(riRemove2, rr); LOG.info(rleSparseVector.toString()); - Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(11).getMemory()); + Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(11).getMemorySize()); Assert.assertEquals(-102400, rleSparseVector.getCapacityAtTime(9) - .getMemory()); - Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemory()); + .getMemorySize()); + Assert.assertEquals(0, rleSparseVector.getCapacityAtTime(13).getMemorySize()); Assert.assertEquals(102400, rleSparseVector.getCapacityAtTime(20) - .getMemory()); + .getMemorySize()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java index eb0b0e26fee..d762b360e37 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/reservation/planning/TestSimpleCapacityReplanner.java @@ -153,9 +153,9 @@ public class TestSimpleCapacityReplanner { // check resources at each moment in time no more exceed capacity for (int i = 0; i < 20; i++) { - int tot = 0; + long tot = 0; for (ReservationAllocation r : plan.getReservationsAtTime(i)) { - tot = r.getResourcesAtTime(i).getMemory(); + tot = r.getResourcesAtTime(i).getMemorySize(); } assertTrue(tot <= 70 * 1024); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java index 12f3ee4f8c8..3c3d8781466 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestAbstractYarnScheduler.java @@ -132,38 +132,38 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase { Assert.assertEquals(6, expectedMaxMemory.length); Assert.assertEquals(0, scheduler.getNumClusterNodes()); - int maxMemory = scheduler.getMaximumResourceCapability().getMemory(); + long maxMemory = scheduler.getMaximumResourceCapability().getMemorySize(); Assert.assertEquals(expectedMaxMemory[0], maxMemory); RMNode node1 = MockNodes.newNodeInfo( 0, Resources.createResource(node1MaxMemory), 1, "127.0.0.2"); scheduler.handle(new NodeAddedSchedulerEvent(node1)); Assert.assertEquals(1, scheduler.getNumClusterNodes()); - maxMemory = scheduler.getMaximumResourceCapability().getMemory(); + maxMemory = scheduler.getMaximumResourceCapability().getMemorySize(); Assert.assertEquals(expectedMaxMemory[1], maxMemory); scheduler.handle(new NodeRemovedSchedulerEvent(node1)); Assert.assertEquals(0, scheduler.getNumClusterNodes()); - maxMemory = scheduler.getMaximumResourceCapability().getMemory(); + maxMemory = scheduler.getMaximumResourceCapability().getMemorySize(); Assert.assertEquals(expectedMaxMemory[2], maxMemory); RMNode node2 = MockNodes.newNodeInfo( 0, Resources.createResource(node2MaxMemory), 2, "127.0.0.3"); scheduler.handle(new NodeAddedSchedulerEvent(node2)); Assert.assertEquals(1, scheduler.getNumClusterNodes()); - maxMemory = scheduler.getMaximumResourceCapability().getMemory(); + maxMemory = scheduler.getMaximumResourceCapability().getMemorySize(); Assert.assertEquals(expectedMaxMemory[3], maxMemory); RMNode node3 = MockNodes.newNodeInfo( 0, Resources.createResource(node3MaxMemory), 3, "127.0.0.4"); scheduler.handle(new NodeAddedSchedulerEvent(node3)); Assert.assertEquals(2, scheduler.getNumClusterNodes()); - maxMemory = scheduler.getMaximumResourceCapability().getMemory(); + maxMemory = scheduler.getMaximumResourceCapability().getMemorySize(); Assert.assertEquals(expectedMaxMemory[4], maxMemory); scheduler.handle(new NodeRemovedSchedulerEvent(node3)); Assert.assertEquals(1, scheduler.getNumClusterNodes()); - maxMemory = scheduler.getMaximumResourceCapability().getMemory(); + maxMemory = scheduler.getMaximumResourceCapability().getMemorySize(); Assert.assertEquals(expectedMaxMemory[5], maxMemory); scheduler.handle(new NodeRemovedSchedulerEvent(node2)); @@ -617,8 +617,8 @@ public class TestAbstractYarnScheduler extends ParameterizedSchedulerTestBase { final Resource schedulerMaximumResourceCapability = scheduler .getMaximumResourceCapability(); - Assert.assertEquals(expectedMaximumResource.getMemory(), - schedulerMaximumResourceCapability.getMemory()); + Assert.assertEquals(expectedMaximumResource.getMemorySize(), + schedulerMaximumResourceCapability.getMemorySize()); Assert.assertEquals(expectedMaximumResource.getVirtualCores(), schedulerMaximumResourceCapability.getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java index c772ae142c7..62b1082f534 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestQueueMetrics.java @@ -401,10 +401,10 @@ public class TestQueueMetrics { assertCounter("AppsKilled", killed, rb); } - public static void checkResources(MetricsSource source, int allocatedMB, - int allocatedCores, int allocCtnrs, long aggreAllocCtnrs, - long aggreReleasedCtnrs, int availableMB, int availableCores, int pendingMB, - int pendingCores, int pendingCtnrs, int reservedMB, int reservedCores, + public static void checkResources(MetricsSource source, long allocatedMB, + long allocatedCores, int allocCtnrs, long aggreAllocCtnrs, + long aggreReleasedCtnrs, long availableMB, long availableCores, long pendingMB, + long pendingCores, int pendingCtnrs, long reservedMB, long reservedCores, int reservedCtnrs) { MetricsRecordBuilder rb = getMetrics(source); assertGauge("AllocatedMB", allocatedMB, rb); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java index 1ddeb0b33b8..5c9e320a076 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestResourceUsage.java @@ -135,7 +135,7 @@ public class TestResourceUsage { } void check(int mem, int cpu, Resource res) { - Assert.assertEquals(mem, res.getMemory()); + Assert.assertEquals(mem, res.getMemorySize()); Assert.assertEquals(cpu, res.getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java index dede48f7c06..63f97c5348c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/TestSchedulerUtils.java @@ -114,37 +114,37 @@ public class TestSchedulerUtils { ask.setCapability(Resources.createResource(-1024)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(minMemory, ask.getCapability().getMemory()); + assertEquals(minMemory, ask.getCapability().getMemorySize()); // case zero memory ask.setCapability(Resources.createResource(0)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(minMemory, ask.getCapability().getMemory()); + assertEquals(minMemory, ask.getCapability().getMemorySize()); // case memory is a multiple of minMemory ask.setCapability(Resources.createResource(2 * minMemory)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(2 * minMemory, ask.getCapability().getMemory()); + assertEquals(2 * minMemory, ask.getCapability().getMemorySize()); // case memory is not a multiple of minMemory ask.setCapability(Resources.createResource(minMemory + 10)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(2 * minMemory, ask.getCapability().getMemory()); + assertEquals(2 * minMemory, ask.getCapability().getMemorySize()); // case memory is equal to max allowed ask.setCapability(Resources.createResource(maxMemory)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(maxMemory, ask.getCapability().getMemory()); + assertEquals(maxMemory, ask.getCapability().getMemorySize()); // case memory is just less than max ask.setCapability(Resources.createResource(maxMemory - 10)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(maxMemory, ask.getCapability().getMemory()); + assertEquals(maxMemory, ask.getCapability().getMemorySize()); // max is not a multiple of min maxResource = Resources.createResource(maxMemory - 10, 0); @@ -152,14 +152,14 @@ public class TestSchedulerUtils { // multiple of minMemory > maxMemory, then reduce to maxMemory SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(maxResource.getMemory(), ask.getCapability().getMemory()); + assertEquals(maxResource.getMemorySize(), ask.getCapability().getMemorySize()); // ask is more than max maxResource = Resources.createResource(maxMemory, 0); ask.setCapability(Resources.createResource(maxMemory + 100)); SchedulerUtils.normalizeRequest(ask, resourceCalculator, null, minResource, maxResource); - assertEquals(maxResource.getMemory(), ask.getCapability().getMemory()); + assertEquals(maxResource.getMemorySize(), ask.getCapability().getMemorySize()); } @Test (timeout = 30000) @@ -184,7 +184,7 @@ public class TestSchedulerUtils { ask, resourceCalculator, clusterResource, minResource, maxResource); assertEquals(minResource, ask.getCapability()); assertEquals(1, ask.getCapability().getVirtualCores()); - assertEquals(1024, ask.getCapability().getMemory()); + assertEquals(1024, ask.getCapability().getMemorySize()); // case non-zero memory & zero cores ask.setCapability(Resources.createResource(1536, 0)); @@ -192,7 +192,7 @@ public class TestSchedulerUtils { ask, resourceCalculator, clusterResource, minResource, maxResource); assertEquals(Resources.createResource(2048, 1), ask.getCapability()); assertEquals(1, ask.getCapability().getVirtualCores()); - assertEquals(2048, ask.getCapability().getMemory()); + assertEquals(2048, ask.getCapability().getMemorySize()); } @Test(timeout = 30000) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java index 8a3051b0f51..175f5bbdc1a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationLimits.java @@ -292,12 +292,12 @@ public class TestApplicationLimits { // Assert in metrics assertEquals(queue.getMetrics().getAMResourceLimitMB(), - amResourceLimit.getMemory()); + amResourceLimit.getMemorySize()); assertEquals(queue.getMetrics().getAMResourceLimitVCores(), amResourceLimit.getVirtualCores()); assertEquals( - (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()), + (int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()), queue.getMetrics().getAvailableMB() ); @@ -312,7 +312,7 @@ public class TestApplicationLimits { Resource.newInstance(96*GB, 1)); assertEquals( - (int)(clusterResource.getMemory() * queue.getAbsoluteCapacity()), + (int)(clusterResource.getMemorySize() * queue.getAbsoluteCapacity()), queue.getMetrics().getAvailableMB() ); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java index d8161f824c4..9bb8827e703 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestApplicationPriority.java @@ -171,13 +171,13 @@ public class TestApplicationPriority { 7, 2 * GB, nm1); Assert.assertEquals(7, allocated1.size()); - Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize()); // check node report, 15 GB used (1 AM and 7 containers) and 1 GB available SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); - Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemorySize()); // Submit the second app App2 with priority 8 (Higher than App1) Priority appPriority2 = Priority.newInstance(8); @@ -189,8 +189,8 @@ public class TestApplicationPriority { // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); // get scheduler CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); @@ -210,8 +210,8 @@ public class TestApplicationPriority { // check node report, 12 GB used and 4 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemorySize()); // send updated request for App1 am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList()); @@ -226,8 +226,8 @@ public class TestApplicationPriority { // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); rm.stop(); } @@ -256,13 +256,13 @@ public class TestApplicationPriority { 7, 1 * GB, nm1); Assert.assertEquals(7, allocated1.size()); - Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemorySize()); // check node report, 8 GB used (1 AM and 7 containers) and 0 GB available SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); - Assert.assertEquals(8 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(8 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); // Submit the second app App2 with priority 7 Priority appPriority2 = Priority.newInstance(7); @@ -287,8 +287,8 @@ public class TestApplicationPriority { // check node report, 1 GB used and 7 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(7 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(7 * GB, report_nm1.getAvailableResource().getMemorySize()); rm.stop(); } @@ -477,13 +477,13 @@ public class TestApplicationPriority { NUM_CONTAINERS, 2 * GB, nm1); Assert.assertEquals(7, allocated1.size()); - Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize()); // check node report, 15 GB used (1 AM and 7 containers) and 1 GB available SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); - Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(15 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(1 * GB, report_nm1.getAvailableResource().getMemorySize()); // Submit the second app App2 with priority 8 (Higher than App1) Priority appPriority2 = Priority.newInstance(8); @@ -495,8 +495,8 @@ public class TestApplicationPriority { // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); // get scheduler CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); @@ -518,8 +518,8 @@ public class TestApplicationPriority { // check node report, 12 GB used and 4 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(12 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(4 * GB, report_nm1.getAvailableResource().getMemorySize()); // add request for containers App1 am1.allocate("127.0.0.1", 2 * GB, 10, new ArrayList()); @@ -531,8 +531,8 @@ public class TestApplicationPriority { Assert.assertEquals(2, allocated2.size()); // check node report, 16 GB used and 0 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(16 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); // kill 1 more counter = 0; @@ -548,8 +548,8 @@ public class TestApplicationPriority { // check node report, 14 GB used and 2 GB available report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(14 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(14 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemorySize()); // Change the priority of App1 to 3 (lowest) Priority appPriority3 = Priority.newInstance(3); @@ -617,7 +617,7 @@ public class TestApplicationPriority { ResourceScheduler scheduler = rm1.getRMContext().getScheduler(); LeafQueue defaultQueue = (LeafQueue) ((CapacityScheduler) scheduler).getQueue("default"); - int memory = defaultQueue.getAMResourceLimit().getMemory() / 2; + int memory = (int) (defaultQueue.getAMResourceLimit().getMemorySize() / 2); // App-1 with priority 5 submitted and running Priority appPriority1 = Priority.newInstance(5); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java index 16063b55c6a..72d2f85cad0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java @@ -508,12 +508,12 @@ public class TestCapacityScheduler { private void checkApplicationResourceUsage(int expected, Application application) { - Assert.assertEquals(expected, application.getUsedResources().getMemory()); + Assert.assertEquals(expected, application.getUsedResources().getMemorySize()); } private void checkNodeResourceUsage(int expected, org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) { - Assert.assertEquals(expected, node.getUsed().getMemory()); + Assert.assertEquals(expected, node.getUsed().getMemorySize()); node.checkResourceUsage(); } @@ -562,14 +562,14 @@ public class TestCapacityScheduler { cs.handle(new NodeAddedSchedulerEvent(n1)); cs.handle(new NodeAddedSchedulerEvent(n2)); - Assert.assertEquals(6 * GB, cs.getClusterResource().getMemory()); + Assert.assertEquals(6 * GB, cs.getClusterResource().getMemorySize()); // reconnect n1 with downgraded memory n1 = MockNodes.newNodeInfo(0, MockNodes.newResource(2 * GB), 1); cs.handle(new NodeRemovedSchedulerEvent(n1)); cs.handle(new NodeAddedSchedulerEvent(n1)); - Assert.assertEquals(4 * GB, cs.getClusterResource().getMemory()); + Assert.assertEquals(4 * GB, cs.getClusterResource().getMemorySize()); cs.stop(); } @@ -830,8 +830,8 @@ public class TestCapacityScheduler { SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport( nm1.getNodeId()); // check node report, 2 GB used and 2 GB available - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemorySize()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1); @@ -847,17 +847,17 @@ public class TestCapacityScheduler { List allocated1 = alloc1Response.getAllocatedContainers(); Assert.assertEquals(1, allocated1.size()); - Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize()); Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 4 GB used and 0 GB available - Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize()); // check container is assigned with 2 GB. Container c1 = allocated1.get(0); - Assert.assertEquals(2 * GB, c1.getResource().getMemory()); + Assert.assertEquals(2 * GB, c1.getResource().getMemorySize()); // update node resource to 2 GB, so resource is over-consumed. Map nodeResourceMap = @@ -872,7 +872,7 @@ public class TestCapacityScheduler { waitCount = 0; while (waitCount++ != 20) { report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - if (report_nm1.getAvailableResource().getMemory() != 0) { + if (report_nm1.getAvailableResource().getMemorySize() != 0) { break; } LOG.info("Waiting for RMNodeResourceUpdateEvent to be handled... Tried " @@ -880,8 +880,9 @@ public class TestCapacityScheduler { Thread.sleep(1000); } // Now, the used resource is still 4 GB, and available resource is minus value. - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory()); + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemorySize()); // Check container can complete successfully in case of resource over-commitment. ContainerStatus containerStatus = BuilderUtils.newContainerStatus( @@ -897,9 +898,9 @@ public class TestCapacityScheduler { Assert.assertEquals(1, attempt1.getJustFinishedContainers().size()); Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses().size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize()); // As container return 2 GB back, the available resource becomes 0 again. - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); // Verify no NPE is trigger in schedule after resource is updated. am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 3 * GB, 1, 1); @@ -1836,16 +1837,16 @@ public class TestCapacityScheduler { assertEquals(1, newNumAppsA); assertEquals(2, newNumAppsRoot); // original consumption on a1 - assertEquals(3 * GB, origOldA1.getResourcesUsed().getMemory()); + assertEquals(3 * GB, origOldA1.getResourcesUsed().getMemorySize()); assertEquals(1, origOldA1.getResourcesUsed().getvCores()); - assertEquals(0, origNewA1.getResourcesUsed().getMemory()); // after the move + assertEquals(0, origNewA1.getResourcesUsed().getMemorySize()); // after the move assertEquals(0, origNewA1.getResourcesUsed().getvCores()); // after the move // app moved here with live containers - assertEquals(3 * GB, targetNewA2.getResourcesUsed().getMemory()); + assertEquals(3 * GB, targetNewA2.getResourcesUsed().getMemorySize()); assertEquals(1, targetNewA2.getResourcesUsed().getvCores()); // it was empty before the move assertEquals(0, targetOldA2.getNumApplications()); - assertEquals(0, targetOldA2.getResourcesUsed().getMemory()); + assertEquals(0, targetOldA2.getResourcesUsed().getMemorySize()); assertEquals(0, targetOldA2.getResourcesUsed().getvCores()); // after the app moved here assertEquals(1, targetNewA2.getNumApplications()); @@ -1859,7 +1860,7 @@ public class TestCapacityScheduler { assertEquals(0, targetOldA2.getNumContainers()); // 1 user with 3GB assertEquals(3 * GB, origOldA1.getUsers().getUsersList().get(0) - .getResourcesUsed().getMemory()); + .getResourcesUsed().getMemorySize()); // 1 user with 1 core assertEquals(1, origOldA1.getUsers().getUsersList().get(0) .getResourcesUsed().getvCores()); @@ -1867,7 +1868,7 @@ public class TestCapacityScheduler { assertEquals(0, origNewA1.getUsers().getUsersList().size()); // 1 user with 3GB assertEquals(3 * GB, targetNewA2.getUsers().getUsersList().get(0) - .getResourcesUsed().getMemory()); + .getResourcesUsed().getMemorySize()); // 1 user with 1 core assertEquals(1, targetNewA2.getUsers().getUsersList().get(0) .getResourcesUsed().getvCores()); @@ -2215,8 +2216,8 @@ public class TestCapacityScheduler { rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report - Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(1 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(9 * GB, report_nm1.getAvailableResource().getMemorySize()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 1 * GB, 1, 1); @@ -2315,19 +2316,19 @@ public class TestCapacityScheduler { assertEquals("max allocation in CS", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - cs.getMaximumResourceCapability().getMemory()); + cs.getMaximumResourceCapability().getMemorySize()); assertEquals("max allocation for A1", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - conf.getMaximumAllocationPerQueue(A1).getMemory()); + conf.getMaximumAllocationPerQueue(A1).getMemorySize()); assertEquals("max allocation", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - conf.getMaximumAllocation().getMemory()); + conf.getMaximumAllocation().getMemorySize()); CSQueue rootQueue = cs.getRootQueue(); CSQueue queueA = findQueue(rootQueue, A); CSQueue queueA1 = findQueue(queueA, A1); assertEquals("queue max allocation", ((LeafQueue) queueA1) - .getMaximumAllocation().getMemory(), 8192); + .getMaximumAllocation().getMemorySize(), 8192); setMaxAllocMb(conf, A1, 4096); @@ -2410,19 +2411,19 @@ public class TestCapacityScheduler { assertEquals("max capability MB in CS", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - cs.getMaximumResourceCapability().getMemory()); + cs.getMaximumResourceCapability().getMemorySize()); assertEquals("max capability vcores in CS", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, cs.getMaximumResourceCapability().getVirtualCores()); assertEquals("max allocation MB A1", 4096, - conf.getMaximumAllocationPerQueue(A1).getMemory()); + conf.getMaximumAllocationPerQueue(A1).getMemorySize()); assertEquals("max allocation vcores A1", 2, conf.getMaximumAllocationPerQueue(A1).getVirtualCores()); assertEquals("cluster max allocation MB", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - conf.getMaximumAllocation().getMemory()); + conf.getMaximumAllocation().getMemorySize()); assertEquals("cluster max allocation vcores", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, conf.getMaximumAllocation().getVirtualCores()); @@ -2431,7 +2432,7 @@ public class TestCapacityScheduler { CSQueue queueA = findQueue(rootQueue, A); CSQueue queueA1 = findQueue(queueA, A1); assertEquals("queue max allocation", ((LeafQueue) queueA1) - .getMaximumAllocation().getMemory(), 4096); + .getMaximumAllocation().getMemorySize(), 4096); setMaxAllocMb(conf, A1, 6144); setMaxAllocVcores(conf, A1, 3); @@ -2439,22 +2440,22 @@ public class TestCapacityScheduler { // conf will have changed but we shouldn't be able to change max allocation // for the actual queue assertEquals("max allocation MB A1", 6144, - conf.getMaximumAllocationPerQueue(A1).getMemory()); + conf.getMaximumAllocationPerQueue(A1).getMemorySize()); assertEquals("max allocation vcores A1", 3, conf.getMaximumAllocationPerQueue(A1).getVirtualCores()); assertEquals("max allocation MB cluster", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - conf.getMaximumAllocation().getMemory()); + conf.getMaximumAllocation().getMemorySize()); assertEquals("max allocation vcores cluster", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, conf.getMaximumAllocation().getVirtualCores()); assertEquals("queue max allocation MB", 6144, - ((LeafQueue) queueA1).getMaximumAllocation().getMemory()); + ((LeafQueue) queueA1).getMaximumAllocation().getMemorySize()); assertEquals("queue max allocation vcores", 3, ((LeafQueue) queueA1).getMaximumAllocation().getVirtualCores()); assertEquals("max capability MB cluster", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, - cs.getMaximumResourceCapability().getMemory()); + cs.getMaximumResourceCapability().getMemorySize()); assertEquals("cluster max capability vcores", YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES, cs.getMaximumResourceCapability().getVirtualCores()); @@ -2479,7 +2480,7 @@ public class TestCapacityScheduler { checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); assertEquals("max allocation MB in CS", 10240, - cs.getMaximumResourceCapability().getMemory()); + cs.getMaximumResourceCapability().getMemorySize()); assertEquals("max allocation vcores in CS", 10, cs.getMaximumResourceCapability().getVirtualCores()); @@ -2525,7 +2526,7 @@ public class TestCapacityScheduler { checkQueueCapacities(cs, A_CAPACITY, B_CAPACITY); assertEquals("max allocation MB in CS", 10240, - cs.getMaximumResourceCapability().getMemory()); + cs.getMaximumResourceCapability().getMemorySize()); assertEquals("max allocation vcores in CS", 10, cs.getMaximumResourceCapability().getVirtualCores()); @@ -2537,15 +2538,15 @@ public class TestCapacityScheduler { CSQueue queueB2 = findQueue(queueB, B2); assertEquals("queue A1 max allocation MB", 4096, - ((LeafQueue) queueA1).getMaximumAllocation().getMemory()); + ((LeafQueue) queueA1).getMaximumAllocation().getMemorySize()); assertEquals("queue A1 max allocation vcores", 4, ((LeafQueue) queueA1).getMaximumAllocation().getVirtualCores()); assertEquals("queue A2 max allocation MB", 10240, - ((LeafQueue) queueA2).getMaximumAllocation().getMemory()); + ((LeafQueue) queueA2).getMaximumAllocation().getMemorySize()); assertEquals("queue A2 max allocation vcores", 10, ((LeafQueue) queueA2).getMaximumAllocation().getVirtualCores()); assertEquals("queue B2 max allocation MB", 10240, - ((LeafQueue) queueB2).getMaximumAllocation().getMemory()); + ((LeafQueue) queueB2).getMaximumAllocation().getMemorySize()); assertEquals("queue B2 max allocation vcores", 10, ((LeafQueue) queueB2).getMaximumAllocation().getVirtualCores()); @@ -2555,19 +2556,19 @@ public class TestCapacityScheduler { // cluster level setting should change and any queues without // per queue setting assertEquals("max allocation MB in CS", 12288, - cs.getMaximumResourceCapability().getMemory()); + cs.getMaximumResourceCapability().getMemorySize()); assertEquals("max allocation vcores in CS", 12, cs.getMaximumResourceCapability().getVirtualCores()); assertEquals("queue A1 max MB allocation", 4096, - ((LeafQueue) queueA1).getMaximumAllocation().getMemory()); + ((LeafQueue) queueA1).getMaximumAllocation().getMemorySize()); assertEquals("queue A1 max vcores allocation", 4, ((LeafQueue) queueA1).getMaximumAllocation().getVirtualCores()); assertEquals("queue A2 max MB allocation", 12288, - ((LeafQueue) queueA2).getMaximumAllocation().getMemory()); + ((LeafQueue) queueA2).getMaximumAllocation().getMemorySize()); assertEquals("queue A2 max vcores allocation", 12, ((LeafQueue) queueA2).getMaximumAllocation().getVirtualCores()); assertEquals("queue B2 max MB allocation", 12288, - ((LeafQueue) queueB2).getMaximumAllocation().getMemory()); + ((LeafQueue) queueB2).getMaximumAllocation().getMemorySize()); assertEquals("queue B2 max vcores allocation", 12, ((LeafQueue) queueB2).getMaximumAllocation().getVirtualCores()); } @@ -2614,7 +2615,7 @@ public class TestCapacityScheduler { // Maximum resoure of b1 is 100 * 0.895 * 0.792 = 71 GB // 2 GBs used by am, so it's 71 - 2 = 69G. Assert.assertEquals(69 * GB, - am1.doHeartbeat().getAvailableResources().getMemory()); + am1.doHeartbeat().getAvailableResources().getMemorySize()); RMApp app2 = rm1.submitApp(1 * GB, "app", "user", null, "b2"); MockAM am2 = MockRM.launchAndRegisterAM(app2, rm1, nm1); @@ -2630,7 +2631,7 @@ public class TestCapacityScheduler { // B1 uses 3 GB (2 * 1GB containers and 1 AM container) // Available is 100 - 41 - 3 = 56 GB Assert.assertEquals(56 * GB, - am1.doHeartbeat().getAvailableResources().getMemory()); + am1.doHeartbeat().getAvailableResources().getMemorySize()); // Now we submit app3 to a1 (in higher level hierarchy), to see if headroom // of app1 (in queue b1) updated correctly @@ -2649,7 +2650,7 @@ public class TestCapacityScheduler { // A1 uses 25 GB (3 * 8GB containers and 1 AM container) // Available is 100 - 41 - 4 - 25 = 30 GB Assert.assertEquals(30 * GB, - am1.doHeartbeat().getAvailableResources().getMemory()); + am1.doHeartbeat().getAvailableResources().getMemorySize()); } @Test @@ -2857,7 +2858,7 @@ public class TestCapacityScheduler { memory, queue.getQueueResourceUsage() .getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label) - .getMemory()); + .getMemorySize()); } private void checkPendingResourceGreaterThanZero(MockRM rm, String queueName, @@ -2866,7 +2867,7 @@ public class TestCapacityScheduler { CSQueue queue = cs.getQueue(queueName); Assert.assertTrue(queue.getQueueResourceUsage() .getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label) - .getMemory() > 0); + .getMemorySize() > 0); } // Test verifies AM Used resource for LeafQueue when AM ResourceRequest is @@ -3066,7 +3067,7 @@ public class TestCapacityScheduler { u0Priority, recordFactory))); cs.handle(new NodeUpdateSchedulerEvent(node)); cs.handle(new NodeUpdateSchedulerEvent(node2)); - assertEquals(6*GB, fiCaApp1.getHeadroom().getMemory()); + assertEquals(6*GB, fiCaApp1.getHeadroom().getMemorySize()); assertEquals(15, fiCaApp1.getHeadroom().getVirtualCores()); // allocate container for app2 with 1GB memory and 1 vcore @@ -3075,7 +3076,7 @@ public class TestCapacityScheduler { u0Priority, recordFactory))); cs.handle(new NodeUpdateSchedulerEvent(node)); cs.handle(new NodeUpdateSchedulerEvent(node2)); - assertEquals(9*GB, fiCaApp2.getHeadroom().getMemory()); + assertEquals(9*GB, fiCaApp2.getHeadroom().getMemorySize()); assertEquals(15, fiCaApp2.getHeadroom().getVirtualCores()); } @@ -3180,7 +3181,7 @@ public class TestCapacityScheduler { FiCaSchedulerApp app = getFiCaSchedulerApp(rm, app1.getApplicationId()); Assert.assertEquals(2 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); checkPendingResource(rm, "a1", 2 * GB, null); checkPendingResource(rm, "a", 2 * GB, null); checkPendingResource(rm, "root", 2 * GB, null); @@ -3194,7 +3195,7 @@ public class TestCapacityScheduler { null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); checkPendingResource(rm, "a1", 6 * GB, null); checkPendingResource(rm, "a", 6 * GB, null); checkPendingResource(rm, "root", 6 * GB, null); @@ -3210,7 +3211,7 @@ public class TestCapacityScheduler { .newInstance(containerId3, Resources.createResource(2 * GB))), null); Assert.assertEquals(4 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); checkPendingResource(rm, "a1", 4 * GB, null); checkPendingResource(rm, "a", 4 * GB, null); checkPendingResource(rm, "root", 4 * GB, null); @@ -3228,10 +3229,10 @@ public class TestCapacityScheduler { Resource amResourceLimit = queueA.getAMResourceLimit(); Resource amResource1 = - Resource.newInstance(amResourceLimit.getMemory() + 1024, + Resource.newInstance(amResourceLimit.getMemorySize() + 1024, amResourceLimit.getVirtualCores() + 1); Resource amResource2 = - Resource.newInstance(amResourceLimit.getMemory() + 2048, + Resource.newInstance(amResourceLimit.getMemorySize() + 2048, amResourceLimit.getVirtualCores() + 1); rm.submitApp(amResource1, "app-1", userName, null, queueName); @@ -3351,23 +3352,23 @@ public class TestCapacityScheduler { application_0.schedule(); // Check the used resource is 1 GB 1 core - Assert.assertEquals(1 * GB, nm_0.getUsed().getMemory()); + Assert.assertEquals(1 * GB, nm_0.getUsed().getMemorySize()); Resource usedResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getAllocatedResource(); - Assert.assertEquals(usedResource.getMemory(), 1 * GB); + Assert.assertEquals(usedResource.getMemorySize(), 1 * GB); Assert.assertEquals(usedResource.getVirtualCores(), 1); // Check total resource of scheduler node is also changed to 1 GB 1 core Resource totalResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getTotalResource(); - Assert.assertEquals(totalResource.getMemory(), 1 * GB); + Assert.assertEquals(totalResource.getMemorySize(), 1 * GB); Assert.assertEquals(totalResource.getVirtualCores(), 1); // Check the available resource is 0/0 Resource availableResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource(); - Assert.assertEquals(availableResource.getMemory(), 0); + Assert.assertEquals(availableResource.getMemorySize(), 0); Assert.assertEquals(availableResource.getVirtualCores(), 0); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java index b649fc97f2d..e7157b86540 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerLazyPreemption.java @@ -19,27 +19,18 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity; import com.google.common.collect.Sets; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.ResourceRequest; -import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; -import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager.RMActiveServices; import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingEditPolicy; -import org.apache.hadoop.yarn.server.resourcemanager.monitor.SchedulingMonitor; import org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity.ProportionalCapacityPreemptionPolicy; -import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.preemption.PreemptionManager; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeUpdateSchedulerEvent; @@ -56,7 +47,6 @@ import java.util.Map; import java.util.Set; import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; public class TestCapacitySchedulerLazyPreemption extends CapacitySchedulerPreemptionTestBase { @@ -126,9 +116,9 @@ public class TestCapacitySchedulerLazyPreemption // NM1/NM2 has available resource = 0G Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); // AM asks for a 1 * GB container am2.allocate(Arrays.asList(ResourceRequest @@ -205,9 +195,9 @@ public class TestCapacitySchedulerLazyPreemption // NM1/NM2 has available resource = 0G Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); // AM asks for a 1 * GB container with unknown host and unknown rack am2.allocate(Arrays.asList(ResourceRequest @@ -296,9 +286,9 @@ public class TestCapacitySchedulerLazyPreemption // NM1/NM2 has available resource = 0G Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); // AM asks for a 1 * GB container for h3 with hard locality, // h3 doesn't exist in the cluster @@ -393,7 +383,7 @@ public class TestCapacitySchedulerLazyPreemption // NM1 has available resource = 0G Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); am2.allocate("*", 1 * GB, 1, new ArrayList()); // Get edit policy and do one update @@ -493,7 +483,7 @@ public class TestCapacitySchedulerLazyPreemption // NM1 has available resource = 0G Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); am2.allocate("*", 3 * GB, 1, new ArrayList()); // Get edit policy and do one update @@ -582,9 +572,9 @@ public class TestCapacitySchedulerLazyPreemption // NM1/NM2 has available resource = 0G Assert.assertEquals(0 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertEquals(0 * GB, cs.getNode(nm2.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); // AM asks for a 1 * GB container am2.allocate(Arrays.asList(ResourceRequest diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java index ca78e253283..9aef77c8d69 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerNodeLabelUpdate.java @@ -111,7 +111,7 @@ public class TestCapacitySchedulerNodeLabelUpdate { CapacityScheduler scheduler = (CapacityScheduler) rm.getResourceScheduler(); CSQueue queue = scheduler.getQueue(queueName); Assert.assertEquals(memory, queue.getQueueResourceUsage().getUsed(label) - .getMemory()); + .getMemorySize()); } private void checkUsedCapacity(MockRM rm, String queueName, int capacity, @@ -128,7 +128,7 @@ public class TestCapacitySchedulerNodeLabelUpdate { CapacityScheduler scheduler = (CapacityScheduler) rm.getResourceScheduler(); CSQueue queue = scheduler.getQueue(queueName); Assert.assertEquals(memory, queue.getQueueResourceUsage().getAMUsed(label) - .getMemory()); + .getMemorySize()); } private void checkUserUsedResource(MockRM rm, String queueName, @@ -137,7 +137,7 @@ public class TestCapacitySchedulerNodeLabelUpdate { LeafQueue queue = (LeafQueue) scheduler.getQueue(queueName); LeafQueue.User user = queue.getUser(userName); Assert.assertEquals(memory, - user.getResourceUsage().getUsed(partition).getMemory()); + user.getResourceUsage().getUsed(partition).getMemorySize()); } @Test(timeout = 60000) @@ -175,7 +175,7 @@ public class TestCapacitySchedulerNodeLabelUpdate { rm.getResourceScheduler().getAppResourceUsageReport( am1.getApplicationAttemptId()); Assert.assertEquals(1024, appResourceUsageReport.getUsedResources() - .getMemory()); + .getMemorySize()); Assert.assertEquals(1, appResourceUsageReport.getUsedResources() .getVirtualCores()); // request a container. @@ -186,7 +186,7 @@ public class TestCapacitySchedulerNodeLabelUpdate { rm.getResourceScheduler().getAppResourceUsageReport( am1.getApplicationAttemptId()); Assert.assertEquals(2048, appResourceUsageReport.getUsedResources() - .getMemory()); + .getMemorySize()); Assert.assertEquals(2, appResourceUsageReport.getUsedResources() .getVirtualCores()); LeafQueue queue = @@ -196,7 +196,7 @@ public class TestCapacitySchedulerNodeLabelUpdate { for (UserInfo userInfo : users) { if (userInfo.getUsername().equals("user")) { ResourceInfo resourcesUsed = userInfo.getResourcesUsed(); - Assert.assertEquals(2048, resourcesUsed.getMemory()); + Assert.assertEquals(2048, resourcesUsed.getMemorySize()); Assert.assertEquals(2, resourcesUsed.getvCores()); } } @@ -271,9 +271,9 @@ public class TestCapacitySchedulerNodeLabelUpdate { checkUserUsedResource(rm, "a", "user", "x", 0); checkUserUsedResource(rm, "a", "user", "z", 1024); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getUsed("x").getMemory()); + app.getAppAttemptResourceUsage().getUsed("x").getMemorySize()); Assert.assertEquals(1024, - app.getAppAttemptResourceUsage().getUsed("z").getMemory()); + app.getAppAttemptResourceUsage().getUsed("z").getMemorySize()); // change h1's label to y mgr.replaceLabelsOnNode(ImmutableMap.of(nm1.getNodeId(), toSet("y"))); @@ -296,11 +296,11 @@ public class TestCapacitySchedulerNodeLabelUpdate { checkUserUsedResource(rm, "a", "user", "y", 1024); checkUserUsedResource(rm, "a", "user", "z", 0); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getUsed("x").getMemory()); + app.getAppAttemptResourceUsage().getUsed("x").getMemorySize()); Assert.assertEquals(1024, - app.getAppAttemptResourceUsage().getUsed("y").getMemory()); + app.getAppAttemptResourceUsage().getUsed("y").getMemorySize()); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getUsed("z").getMemory()); + app.getAppAttemptResourceUsage().getUsed("z").getMemorySize()); // change h1's label to no label Set emptyLabels = new HashSet<>(); @@ -326,13 +326,13 @@ public class TestCapacitySchedulerNodeLabelUpdate { checkUserUsedResource(rm, "a", "user", "z", 0); checkUserUsedResource(rm, "a", "user", "", 2048); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getUsed("x").getMemory()); + app.getAppAttemptResourceUsage().getUsed("x").getMemorySize()); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getUsed("y").getMemory()); + app.getAppAttemptResourceUsage().getUsed("y").getMemorySize()); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getUsed("z").getMemory()); + app.getAppAttemptResourceUsage().getUsed("z").getMemorySize()); Assert.assertEquals(2048, - app.getAppAttemptResourceUsage().getUsed("").getMemory()); + app.getAppAttemptResourceUsage().getUsed("").getMemorySize()); // Finish the two containers, we should see used resource becomes 0 cs.completedContainer(cs.getRMContainer(containerId2), @@ -460,17 +460,17 @@ public class TestCapacitySchedulerNodeLabelUpdate { checkUserUsedResource(rm, "a", "u2", "z", 2 * GB); checkUserUsedResource(rm, "a", "u2", "", 1 * GB); Assert.assertEquals(0, - application1.getAppAttemptResourceUsage().getUsed("x").getMemory()); + application1.getAppAttemptResourceUsage().getUsed("x").getMemorySize()); Assert.assertEquals(1 * GB, - application1.getAppAttemptResourceUsage().getUsed("z").getMemory()); + application1.getAppAttemptResourceUsage().getUsed("z").getMemorySize()); Assert.assertEquals(2 * GB, - application1.getAppAttemptResourceUsage().getUsed("").getMemory()); + application1.getAppAttemptResourceUsage().getUsed("").getMemorySize()); Assert.assertEquals(0, - application2.getAppAttemptResourceUsage().getUsed("x").getMemory()); + application2.getAppAttemptResourceUsage().getUsed("x").getMemorySize()); Assert.assertEquals(2 * GB, - application2.getAppAttemptResourceUsage().getUsed("z").getMemory()); + application2.getAppAttemptResourceUsage().getUsed("z").getMemorySize()); Assert.assertEquals(1 * GB, - application2.getAppAttemptResourceUsage().getUsed("").getMemory()); + application2.getAppAttemptResourceUsage().getUsed("").getMemorySize()); rm.close(); } @@ -536,9 +536,9 @@ public class TestCapacitySchedulerNodeLabelUpdate { checkUserUsedResource(rm, "a", "user", "x", 0); checkUserUsedResource(rm, "a", "user", "z", 2048); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getAMUsed("x").getMemory()); + app.getAppAttemptResourceUsage().getAMUsed("x").getMemorySize()); Assert.assertEquals(1024, - app.getAppAttemptResourceUsage().getAMUsed("z").getMemory()); + app.getAppAttemptResourceUsage().getAMUsed("z").getMemorySize()); // change h1's label to no label Set emptyLabels = new HashSet<>(); @@ -555,11 +555,11 @@ public class TestCapacitySchedulerNodeLabelUpdate { checkUserUsedResource(rm, "a", "user", "z", 0); checkUserUsedResource(rm, "a", "user", "", 2048); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getAMUsed("x").getMemory()); + app.getAppAttemptResourceUsage().getAMUsed("x").getMemorySize()); Assert.assertEquals(0, - app.getAppAttemptResourceUsage().getAMUsed("z").getMemory()); + app.getAppAttemptResourceUsage().getAMUsed("z").getMemorySize()); Assert.assertEquals(1024, - app.getAppAttemptResourceUsage().getAMUsed("").getMemory()); + app.getAppAttemptResourceUsage().getAMUsed("").getMemorySize()); rm.close(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java index 69444416c16..db6115c4ab6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacitySchedulerSurgicalPreemption.java @@ -120,9 +120,9 @@ public class TestCapacitySchedulerSurgicalPreemption // NM1/NM2 has available resource = 2G/4G Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertEquals(4 * GB, cs.getNode(nm2.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); // AM asks for a 1 * GB container am2.allocate(Arrays.asList(ResourceRequest diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java index 1612201db61..7f4fc2cda59 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestChildQueueOrder.java @@ -166,14 +166,14 @@ public class TestChildQueueOrder { private float computeQueueAbsoluteUsedCapacity(CSQueue queue, int expectedMemory, Resource clusterResource) { return ( - ((float)expectedMemory / (float)clusterResource.getMemory()) + ((float)expectedMemory / (float)clusterResource.getMemorySize()) ); } private float computeQueueUsedCapacity(CSQueue queue, int expectedMemory, Resource clusterResource) { return (expectedMemory / - (clusterResource.getMemory() * queue.getAbsoluteCapacity())); + (clusterResource.getMemorySize() * queue.getAbsoluteCapacity())); } final static float DELTA = 0.0001f; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java index f94c963ec4d..6cf9c618525 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerAllocation.java @@ -28,7 +28,6 @@ import org.apache.hadoop.security.SecurityUtilTestHelper; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LogAggregationContext; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; @@ -38,8 +37,6 @@ import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.security.ContainerTokenIdentifier; import org.apache.hadoop.yarn.server.api.ContainerType; -import org.apache.hadoop.yarn.server.api.records.NodeHealthStatus; -import org.apache.hadoop.yarn.server.api.records.NodeStatus; import org.apache.hadoop.yarn.server.resourcemanager.MockAM; import org.apache.hadoop.yarn.server.resourcemanager.MockNM; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; @@ -53,10 +50,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptS import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEvent; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeEventType; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeStatusEvent; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeRemovedSchedulerEvent; @@ -397,15 +390,15 @@ public class TestContainerAllocation { // NM1 has available resource = 2G (8G - 2 * 1G - 4G) Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Usage of queue = 4G + 2 * 1G + 4G (reserved) Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed().getMemory()); + .getUsed().getMemorySize()); Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage() - .getReserved().getMemory()); + .getReserved().getMemorySize()); Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved() - .getMemory()); + .getMemorySize()); // Cancel asks of app2 and re-kick RM am2.allocate("*", 4 * GB, 0, new ArrayList()); @@ -414,14 +407,14 @@ public class TestContainerAllocation { // App2's reservation will be cancelled Assert.assertTrue(schedulerApp2.getReservedContainers().size() == 0); Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); Assert.assertEquals(6 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed().getMemory()); + .getUsed().getMemorySize()); Assert.assertEquals(0, cs.getRootQueue().getQueueResourceUsage() - .getReserved().getMemory()); + .getReserved().getMemorySize()); Assert.assertEquals(0, leafQueue.getQueueResourceUsage().getReserved() - .getMemory()); + .getMemorySize()); rm1.close(); } @@ -480,15 +473,15 @@ public class TestContainerAllocation { // NM1 has available resource = 2G (8G - 2 * 1G - 4G) Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Usage of queue = 4G + 2 * 1G + 4G (reserved) Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed().getMemory()); + .getUsed().getMemorySize()); Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage() - .getReserved().getMemory()); + .getReserved().getMemorySize()); Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved() - .getMemory()); + .getMemorySize()); // Mark one app1 container as killed/completed and re-kick RM for (RMContainer container : schedulerApp1.getLiveContainers()) { @@ -509,15 +502,15 @@ public class TestContainerAllocation { // NM1 has available resource = 2G (8G - 2 * 1G - 4G) Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Usage of queue = 4G + 2 * 1G Assert.assertEquals(6 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed().getMemory()); + .getUsed().getMemorySize()); Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() - .getReserved().getMemory()); + .getReserved().getMemorySize()); Assert.assertEquals(0 * GB, leafQueue.getQueueResourceUsage().getReserved() - .getMemory()); + .getMemorySize()); rm1.close(); } @@ -576,15 +569,15 @@ public class TestContainerAllocation { // NM1 has available resource = 2G (8G - 2 * 1G - 4G) Assert.assertEquals(2 * GB, cs.getNode(nm1.getNodeId()) - .getUnallocatedResource().getMemory()); + .getUnallocatedResource().getMemorySize()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Usage of queue = 4G + 2 * 1G + 4G (reserved) Assert.assertEquals(10 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed().getMemory()); + .getUsed().getMemorySize()); Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage() - .getReserved().getMemory()); + .getReserved().getMemorySize()); Assert.assertEquals(4 * GB, leafQueue.getQueueResourceUsage().getReserved() - .getMemory()); + .getMemorySize()); // Remove the node cs.handle(new NodeRemovedSchedulerEvent(rmNode1)); @@ -596,11 +589,11 @@ public class TestContainerAllocation { // Usage and Reserved capacity of queue is 0 Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed().getMemory()); + .getUsed().getMemorySize()); Assert.assertEquals(0 * GB, cs.getRootQueue().getQueueResourceUsage() - .getReserved().getMemory()); + .getReserved().getMemorySize()); Assert.assertEquals(0 * GB, leafQueue.getQueueResourceUsage().getReserved() - .getMemory()); + .getMemorySize()); rm1.close(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java index 5edc36ae56c..499e041a14f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestContainerResizing.java @@ -147,7 +147,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 2 * GB, null); Assert.assertEquals(2 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // NM1 do 1 heartbeats CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -157,7 +157,7 @@ public class TestContainerResizing { // Pending resource should be deducted checkPendingResource(rm1, "default", 0 * GB, null); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); verifyContainerIncreased(am1.allocate(null, null), containerId1, 3 * GB); verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 17 * GB); @@ -188,7 +188,7 @@ public class TestContainerResizing { checkUsedResource(rm1, "default", 3 * GB, null); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); ContainerId containerId1 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); @@ -202,7 +202,7 @@ public class TestContainerResizing { verifyContainerDecreased(response, containerId1, 1 * GB); checkUsedResource(rm1, "default", 1 * GB, null); Assert.assertEquals(1 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); // Check if decreased containers added to RMNode RMNodeImpl rmNode = @@ -272,7 +272,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // NM1 do 1 heartbeats CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -284,21 +284,21 @@ public class TestContainerResizing { /* Check reservation statuses */ // Increase request should be reserved Assert.assertTrue(rmContainer1.hasIncreaseReservation()); - Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemory()); + Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemorySize()); Assert.assertFalse(app.getReservedContainers().isEmpty()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Pending resource will not be changed since it's not satisfied checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 9 * GB, null); Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); // Complete one container and do another allocation am1.allocate(null, Arrays.asList(containerId2)); @@ -315,15 +315,15 @@ public class TestContainerResizing { // Pending resource will be changed since it's satisfied checkPendingResource(rm1, "default", 0 * GB, null); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 7 * GB, null); Assert.assertEquals(7 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); Assert.assertEquals(7 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 1 * GB); rm1.close(); @@ -373,7 +373,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // NM1 do 1 heartbeats CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -390,15 +390,15 @@ public class TestContainerResizing { // Pending resource will not be changed since it's not satisfied checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will *NOT* be updated checkUsedResource(rm1, "default", 3 * GB, null); Assert.assertEquals(3 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); rm1.close(); } @@ -453,7 +453,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // NM1 do 1 heartbeats CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -465,21 +465,21 @@ public class TestContainerResizing { /* Check reservation statuses */ // Increase request should be reserved Assert.assertTrue(rmContainer1.hasIncreaseReservation()); - Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemory()); + Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemorySize()); Assert.assertFalse(app.getReservedContainers().isEmpty()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Pending resource will not be changed since it's not satisfied checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 9 * GB, null); Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); // Complete one container and cancel increase request (via send a increase // request, make target_capacity=existing_capacity) @@ -501,15 +501,15 @@ public class TestContainerResizing { // Pending resource will be changed since it's satisfied checkPendingResource(rm1, "default", 0 * GB, null); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 1 * GB, null); Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); Assert.assertEquals(1 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); rm1.close(); } @@ -565,7 +565,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // NM1 do 1 heartbeats CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -577,21 +577,21 @@ public class TestContainerResizing { /* Check reservation statuses */ // Increase request should be reserved Assert.assertTrue(rmContainer1.hasIncreaseReservation()); - Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemory()); + Assert.assertEquals(6 * GB, rmContainer1.getReservedResource().getMemorySize()); Assert.assertFalse(app.getReservedContainers().isEmpty()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Pending resource will not be changed since it's not satisfied checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 10 * GB, null); Assert.assertEquals(10 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(4 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); // Complete one container and cancel increase request (via send a increase // request, make target_capacity=existing_capacity) @@ -611,15 +611,15 @@ public class TestContainerResizing { // Pending resource will be changed since it's satisfied checkPendingResource(rm1, "default", 0 * GB, null); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 1 * GB, null); Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); Assert.assertEquals(1 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); rm1.close(); } @@ -674,7 +674,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // NM1 do 1 heartbeats CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -686,21 +686,21 @@ public class TestContainerResizing { /* Check reservation statuses */ // Increase request should be reserved Assert.assertTrue(rmContainer2.hasIncreaseReservation()); - Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemory()); + Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemorySize()); Assert.assertFalse(app.getReservedContainers().isEmpty()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Pending resource will not be changed since it's not satisfied checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 9 * GB, null); Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); // Complete container2, container will be unreserved and completed am1.allocate(null, Arrays.asList(containerId2)); @@ -713,15 +713,15 @@ public class TestContainerResizing { // Pending resource will be changed since it's satisfied checkPendingResource(rm1, "default", 0 * GB, null); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 1 * GB, null); Assert.assertEquals(1 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); Assert.assertEquals(1 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); rm1.close(); } @@ -771,7 +771,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // NM1 do 1 heartbeats CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -783,21 +783,21 @@ public class TestContainerResizing { /* Check reservation statuses */ // Increase request should be reserved Assert.assertTrue(rmContainer2.hasIncreaseReservation()); - Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemory()); + Assert.assertEquals(6 * GB, rmContainer2.getReservedResource().getMemorySize()); Assert.assertFalse(app.getReservedContainers().isEmpty()); Assert.assertNotNull(cs.getNode(nm1.getNodeId()).getReservedContainer()); // Pending resource will not be changed since it's not satisfied checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 9 * GB, null); Assert.assertEquals(9 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); // Kill the application cs.handle(new AppAttemptRemovedSchedulerEvent(am1.getApplicationAttemptId(), @@ -811,15 +811,15 @@ public class TestContainerResizing { // Pending resource will be changed since it's satisfied checkPendingResource(rm1, "default", 0 * GB, null); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 0 * GB, null); Assert.assertEquals(0 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); rm1.close(); } @@ -894,7 +894,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Get rmNode1 CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -916,15 +916,15 @@ public class TestContainerResizing { // There're still 3 pending increase requests checkPendingResource(rm1, "default", 3 * GB, null); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 10 * GB, null); Assert.assertEquals(10 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); Assert.assertEquals(10 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); rm1.close(); } @@ -975,7 +975,7 @@ public class TestContainerResizing { checkPendingResource(rm1, "default", 6 * GB, null); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Get rmNode1 CapacityScheduler cs = (CapacityScheduler) rm1.getResourceScheduler(); @@ -997,15 +997,15 @@ public class TestContainerResizing { // There're still 3 pending increase requests checkPendingResource(rm1, "default", 3 * GB, null); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getPending().getMemory()); + app.getAppAttemptResourceUsage().getPending().getMemorySize()); // Queue/user/application's usage will be updated checkUsedResource(rm1, "default", 10 * GB, null); Assert.assertEquals(10 * GB, ((LeafQueue) cs.getQueue("default")) - .getUser("user").getUsed().getMemory()); + .getUser("user").getUsed().getMemorySize()); Assert.assertEquals(0 * GB, - app.getAppAttemptResourceUsage().getReserved().getMemory()); + app.getAppAttemptResourceUsage().getReserved().getMemorySize()); Assert.assertEquals(10 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); rm1.close(); } @@ -1033,7 +1033,7 @@ public class TestContainerResizing { FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp( rm, app1.getApplicationId()); Assert.assertEquals(3 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); // making sure container is launched ContainerId containerId1 = ContainerId.newContainerId(am1.getApplicationAttemptId(), 1); @@ -1062,7 +1062,7 @@ public class TestContainerResizing { Assert.assertEquals(memory, queue.getQueueResourceUsage() .getPending(label == null ? RMNodeLabelsManager.NO_LABEL : label) - .getMemory()); + .getMemorySize()); } private void checkUsedResource(MockRM rm, String queueName, int memory, @@ -1072,7 +1072,7 @@ public class TestContainerResizing { Assert.assertEquals(memory, queue.getQueueResourceUsage() .getUsed(label == null ? RMNodeLabelsManager.NO_LABEL : label) - .getMemory()); + .getMemorySize()); } private void verifyContainerIncreased(AllocateResponse response, @@ -1082,7 +1082,7 @@ public class TestContainerResizing { for (Container c : increasedContainers) { if (c.getId().equals(containerId)) { found = true; - Assert.assertEquals(mem, c.getResource().getMemory()); + Assert.assertEquals(mem, c.getResource().getMemorySize()); } } if (!found) { @@ -1097,7 +1097,7 @@ public class TestContainerResizing { for (Container c : decreasedContainers) { if (c.getId().equals(containerId)) { found = true; - Assert.assertEquals(mem, c.getResource().getMemory()); + Assert.assertEquals(mem, c.getResource().getMemorySize()); } } if (!found) { @@ -1121,6 +1121,6 @@ public class TestContainerResizing { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); SchedulerNode node = cs.getNode(nodeId); Assert - .assertEquals(expectedMemory, node.getUnallocatedResource().getMemory()); + .assertEquals(expectedMemory, node.getUnallocatedResource().getMemorySize()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java index 645086d2d3f..d388172130a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestIncreaseAllocationExpirer.java @@ -98,7 +98,7 @@ public class TestIncreaseAllocationExpirer { FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp( rm1, app1.getApplicationId()); Assert.assertEquals(2 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB); // Report container status nm1.nodeHeartbeat( @@ -129,11 +129,11 @@ public class TestIncreaseAllocationExpirer { // Verify container size is 3G Assert.assertEquals( 3 * GB, rm1.getResourceScheduler().getRMContainer(containerId2) - .getAllocatedResource().getMemory()); + .getAllocatedResource().getMemorySize()); // Verify total resource usage checkUsedResource(rm1, "default", 4 * GB, null); Assert.assertEquals(4 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); // Verify available resource verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB); rm1.stop(); @@ -172,7 +172,7 @@ public class TestIncreaseAllocationExpirer { FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp( rm1, app1.getApplicationId()); Assert.assertEquals(2 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB); nm1.nodeHeartbeat( app1.getCurrentAppAttempt() @@ -190,7 +190,7 @@ public class TestIncreaseAllocationExpirer { // Verify resource usage checkUsedResource(rm1, "default", 4 * GB, null); Assert.assertEquals(4 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB); // Wait long enough for the increase token to expire, and for the roll // back action to complete @@ -198,11 +198,11 @@ public class TestIncreaseAllocationExpirer { // Verify container size is 1G Assert.assertEquals( 1 * GB, rm1.getResourceScheduler().getRMContainer(containerId2) - .getAllocatedResource().getMemory()); + .getAllocatedResource().getMemorySize()); // Verify total resource usage is 2G checkUsedResource(rm1, "default", 2 * GB, null); Assert.assertEquals(2 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); // Verify available resource is rolled back to 18GB verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 18 * GB); rm1.stop(); @@ -276,7 +276,7 @@ public class TestIncreaseAllocationExpirer { FiCaSchedulerApp app = TestUtils.getFiCaSchedulerApp( rm1, app1.getApplicationId()); Assert.assertEquals(6 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); // Verify available resource is now reduced to 14GB verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 14 * GB); // Use the first token (3G) @@ -287,11 +287,11 @@ public class TestIncreaseAllocationExpirer { // Verify container size is rolled back to 3G Assert.assertEquals( 3 * GB, rm1.getResourceScheduler().getRMContainer(containerId2) - .getAllocatedResource().getMemory()); + .getAllocatedResource().getMemorySize()); // Verify total resource usage is 4G checkUsedResource(rm1, "default", 4 * GB, null); Assert.assertEquals(4 * GB, - app.getAppAttemptResourceUsage().getUsed().getMemory()); + app.getAppAttemptResourceUsage().getUsed().getMemorySize()); // Verify available resource is rolled back to 14GB verifyAvailableResourceOfSchedulerNode(rm1, nm1.getNodeId(), 16 * GB); // Verify NM receives the decrease message (3G) @@ -299,7 +299,7 @@ public class TestIncreaseAllocationExpirer { nm1.nodeHeartbeat(true).getContainersToDecrease(); Assert.assertEquals(1, containersToDecrease.size()); Assert.assertEquals( - 3 * GB, containersToDecrease.get(0).getResource().getMemory()); + 3 * GB, containersToDecrease.get(0).getResource().getMemorySize()); rm1.stop(); } @@ -394,13 +394,13 @@ public class TestIncreaseAllocationExpirer { Thread.sleep(10000); Assert.assertEquals( 2 * GB, rm1.getResourceScheduler().getRMContainer(containerId2) - .getAllocatedResource().getMemory()); + .getAllocatedResource().getMemorySize()); Assert.assertEquals( 3 * GB, rm1.getResourceScheduler().getRMContainer(containerId3) - .getAllocatedResource().getMemory()); + .getAllocatedResource().getMemorySize()); Assert.assertEquals( 4 * GB, rm1.getResourceScheduler().getRMContainer(containerId4) - .getAllocatedResource().getMemory()); + .getAllocatedResource().getMemorySize()); // Verify NM receives 2 decrease message List containersToDecrease = nm1.nodeHeartbeat(true).getContainersToDecrease(); @@ -408,9 +408,9 @@ public class TestIncreaseAllocationExpirer { // Sort the list to make sure containerId3 is the first Collections.sort(containersToDecrease); Assert.assertEquals( - 3 * GB, containersToDecrease.get(0).getResource().getMemory()); + 3 * GB, containersToDecrease.get(0).getResource().getMemorySize()); Assert.assertEquals( - 4 * GB, containersToDecrease.get(1).getResource().getMemory()); + 4 * GB, containersToDecrease.get(1).getResource().getMemorySize()); rm1.stop(); } @@ -421,7 +421,7 @@ public class TestIncreaseAllocationExpirer { Assert.assertEquals(memory, queue.getQueueResourceUsage() .getUsed(label == null ? RMNodeLabelsManager.NO_LABEL : label) - .getMemory()); + .getMemorySize()); } private void verifyAvailableResourceOfSchedulerNode(MockRM rm, NodeId nodeId, @@ -429,7 +429,7 @@ public class TestIncreaseAllocationExpirer { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); SchedulerNode node = cs.getNode(nodeId); Assert - .assertEquals(expectedMemory, node.getUnallocatedResource().getMemory()); + .assertEquals(expectedMemory, node.getUnallocatedResource().getMemorySize()); } private Container getContainer( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java index 263b95bdd1b..48e6f0e1272 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestLeafQueue.java @@ -326,7 +326,7 @@ public class TestLeafQueue { a.assignContainers(clusterResource, node_0, new ResourceLimits( clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); assertEquals( - (int)(node_0.getTotalResource().getMemory() * a.getCapacity()) - (1*GB), + (int)(node_0.getTotalResource().getMemorySize() * a.getCapacity()) - (1*GB), a.getMetrics().getAvailableMB()); } @@ -408,7 +408,7 @@ public class TestLeafQueue { assertEquals(1, a.getMetrics().getAppsSubmitted()); assertEquals(1, a.getMetrics().getAppsPending()); assertEquals(1, a.getUser(user_0).getActiveApplications()); - assertEquals(app_1.getAMResource().getMemory(), a.getMetrics() + assertEquals(app_1.getAMResource().getMemorySize(), a.getMetrics() .getUsedAMResourceMB()); assertEquals(app_1.getAMResource().getVirtualCores(), a.getMetrics() .getUsedAMResourceVCores()); @@ -516,9 +516,9 @@ public class TestLeafQueue { // Only 1 container a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(1*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(1*GB, a.getMetrics().getAllocatedMB()); assertEquals(0*GB, a.getMetrics().getAvailableMB()); @@ -527,18 +527,18 @@ public class TestLeafQueue { // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(2*GB, a.getMetrics().getAllocatedMB()); // Can't allocate 3rd due to user-limit a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(2*GB, a.getMetrics().getAllocatedMB()); @@ -546,18 +546,18 @@ public class TestLeafQueue { a.setUserLimitFactor(10); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(3*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(3*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(3*GB, a.getMetrics().getAllocatedMB()); // One more should work, for app_1, due to user-limit-factor a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(4*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(4*GB, a.getMetrics().getAllocatedMB()); @@ -566,9 +566,9 @@ public class TestLeafQueue { a.setMaxCapacity(0.5f); a.assignContainers(clusterResource, node_0, new ResourceLimits( clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(4*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(4*GB, a.getMetrics().getAllocatedMB()); @@ -580,9 +580,9 @@ public class TestLeafQueue { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); } - assertEquals(1*GB, a.getUsedResources().getMemory()); - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, a.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(1*GB, a.getMetrics().getAllocatedMB()); @@ -595,12 +595,12 @@ public class TestLeafQueue { RMContainerEventType.KILL, null, true); } - assertEquals(0*GB, a.getUsedResources().getMemory()); - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(0*GB, a.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(0*GB, a.getMetrics().getAllocatedMB()); - assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemory()), + assertEquals((int)(a.getCapacity() * node_0.getTotalResource().getMemorySize()), a.getMetrics().getAvailableMB()); } @@ -665,27 +665,27 @@ public class TestLeafQueue { // 1 container to user_0 a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(3*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(3*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // Allocate one container to app_1. Even if app_0 // submit earlier, it cannot get this container assigned since user_0 // exceeded user-limit already. a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(4*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); // Allocate one container to app_0, before allocating this container, // user-limit = ceil((4 + 1) / 2) = 3G. app_0's used resource (3G) <= // user-limit. a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(7*GB, a.getUsedResources().getMemory()); - assertEquals(6*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(7*GB, a.getUsedResources().getMemorySize()); + assertEquals(6*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); // app_0 doesn't have outstanding resources, there's only one active user. assertEquals("There should only be 1 active user!", @@ -744,7 +744,7 @@ public class TestLeafQueue { "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); //maxqueue 16G, userlimit 13G, - 4G used = 9G - assertEquals(9*GB,app_0.getHeadroom().getMemory()); + assertEquals(9*GB,app_0.getHeadroom().getMemorySize()); //test case 2 final ApplicationAttemptId appAttemptId_2 = @@ -762,13 +762,13 @@ public class TestLeafQueue { qb.computeUserLimitAndSetHeadroom(app_0, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8*GB, qb.getUsedResources().getMemory()); - assertEquals(4*GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8*GB, qb.getUsedResources().getMemorySize()); + assertEquals(4*GB, app_0.getCurrentConsumption().getMemorySize()); //maxqueue 16G, userlimit 13G, - 4G used = 9G BUT //maxqueue 16G - used 8G (4 each app/user) = 8G max headroom (the new logic) - assertEquals(8*GB, app_0.getHeadroom().getMemory()); - assertEquals(4*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(8*GB, app_2.getHeadroom().getMemory()); + assertEquals(8*GB, app_0.getHeadroom().getMemorySize()); + assertEquals(4*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(8*GB, app_2.getHeadroom().getMemorySize()); //test case 3 qb.finishApplication(app_0.getApplicationId(), user_0); @@ -805,10 +805,10 @@ public class TestLeafQueue { new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); qb.computeUserLimitAndSetHeadroom(app_3, clusterResource, "", SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(4*GB, qb.getUsedResources().getMemory()); + assertEquals(4*GB, qb.getUsedResources().getMemorySize()); //maxqueue 16G, userlimit 7G, used (by each user) 2G, headroom 5G (both) - assertEquals(5*GB, app_3.getHeadroom().getMemory()); - assertEquals(5*GB, app_1.getHeadroom().getMemory()); + assertEquals(5*GB, app_3.getHeadroom().getMemorySize()); + assertEquals(5*GB, app_1.getHeadroom().getMemorySize()); //test case 4 final ApplicationAttemptId appAttemptId_4 = TestUtils.getMockApplicationAttemptId(4, 0); @@ -830,13 +830,13 @@ public class TestLeafQueue { //app3 is user1, active from last test case //maxqueue 16G, userlimit 13G, used 2G, would be headroom 10G BUT //10G in use, so max possible headroom is 6G (new logic) - assertEquals(6*GB, app_3.getHeadroom().getMemory()); + assertEquals(6*GB, app_3.getHeadroom().getMemorySize()); //testcase3 still active - 2+2+6=10 - assertEquals(10*GB, qb.getUsedResources().getMemory()); + assertEquals(10*GB, qb.getUsedResources().getMemorySize()); //app4 is user 0 //maxqueue 16G, userlimit 13G, used 8G, headroom 5G //(8G used is 6G from this test case - app4, 2 from last test case, app_1) - assertEquals(5*GB, app_4.getHeadroom().getMemory()); + assertEquals(5*GB, app_4.getHeadroom().getMemorySize()); } @Test @@ -892,16 +892,16 @@ public class TestLeafQueue { a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(1*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); //Now, headroom is the same for all apps for a given user + queue combo //and a change to any app's headroom is reflected for all the user's apps //once those apps are active/have themselves calculated headroom for //allocation at least one time - assertEquals(2*GB, app_0.getHeadroom().getMemory()); - assertEquals(0*GB, app_1.getHeadroom().getMemory());//not yet active - assertEquals(0*GB, app_2.getHeadroom().getMemory());//not yet active + assertEquals(2*GB, app_0.getHeadroom().getMemorySize()); + assertEquals(0*GB, app_1.getHeadroom().getMemorySize());//not yet active + assertEquals(0*GB, app_2.getHeadroom().getMemorySize());//not yet active app_1.updateResourceRequests(Collections.singletonList( TestUtils.createResourceRequest(ResourceRequest.ANY, 1*GB, 2, true, @@ -909,12 +909,12 @@ public class TestLeafQueue { a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_0.getHeadroom().getMemory()); - assertEquals(1*GB, app_1.getHeadroom().getMemory());//now active - assertEquals(0*GB, app_2.getHeadroom().getMemory());//not yet active + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_0.getHeadroom().getMemorySize()); + assertEquals(1*GB, app_1.getHeadroom().getMemorySize());//now active + assertEquals(0*GB, app_2.getHeadroom().getMemorySize());//not yet active //Complete container and verify that headroom is updated, for both apps //for the user @@ -925,8 +925,8 @@ public class TestLeafQueue { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); - assertEquals(2*GB, app_0.getHeadroom().getMemory()); - assertEquals(2*GB, app_1.getHeadroom().getMemory()); + assertEquals(2*GB, app_0.getHeadroom().getMemorySize()); + assertEquals(2*GB, app_1.getHeadroom().getMemorySize()); } @Test @@ -998,23 +998,23 @@ public class TestLeafQueue { // 1 container to user_0 a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // TODO, fix headroom in the future patch - assertEquals(1*GB, app_0.getHeadroom().getMemory()); + assertEquals(1*GB, app_0.getHeadroom().getMemorySize()); // User limit = 4G, 2 in use - assertEquals(0*GB, app_1.getHeadroom().getMemory()); + assertEquals(0*GB, app_1.getHeadroom().getMemorySize()); // the application is not yet active // Again one to user_0 since he hasn't exceeded user limit yet a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(3*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_0.getHeadroom().getMemory()); // 4G - 3G - assertEquals(1*GB, app_1.getHeadroom().getMemory()); // 4G - 3G + assertEquals(3*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_0.getHeadroom().getMemorySize()); // 4G - 3G + assertEquals(1*GB, app_1.getHeadroom().getMemorySize()); // 4G - 3G // Submit requests for app_1 and set max-cap a.setMaxCapacity(.1f); @@ -1027,12 +1027,12 @@ public class TestLeafQueue { // and no more containers to queue since it's already at max-cap a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(3*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_0.getHeadroom().getMemory()); - assertEquals(0*GB, app_1.getHeadroom().getMemory()); + assertEquals(3*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_0.getHeadroom().getMemorySize()); + assertEquals(0*GB, app_1.getHeadroom().getMemorySize()); // Check headroom for app_2 app_1.updateResourceRequests(Collections.singletonList( // unset @@ -1041,7 +1041,7 @@ public class TestLeafQueue { assertEquals(1, a.getActiveUsersManager().getNumActiveUsers()); a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(0*GB, app_2.getHeadroom().getMemory()); // hit queue max-cap + assertEquals(0*GB, app_2.getHeadroom().getMemorySize()); // hit queue max-cap } @Test @@ -1112,25 +1112,25 @@ public class TestLeafQueue { // Only 1 container a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(1*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // Can't allocate 3rd due to user-limit a.setUserLimit(25); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // Submit resource requests for other apps now to 'activate' them @@ -1147,32 +1147,32 @@ public class TestLeafQueue { a.setUserLimitFactor(10); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(3*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(5*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(3*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); // Now allocations should goto app_0 since // user_0 is at user-limit not above it a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(6*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(3*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(6*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(3*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); // Test max-capacity // Now - no more allocs since we are at max-cap a.setMaxCapacity(0.5f); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(6*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(3*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(6*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(3*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); // Revert max-capacity and user-limit-factor // Now, allocations should goto app_3 since it's under user-limit @@ -1180,20 +1180,20 @@ public class TestLeafQueue { a.setUserLimitFactor(1); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(7*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(3*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(7*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(3*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_3.getCurrentConsumption().getMemorySize()); // Now we should assign to app_3 again since user_2 is under user-limit a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8*GB, a.getUsedResources().getMemory()); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(3*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(2*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(8*GB, a.getUsedResources().getMemorySize()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(3*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(2*GB, app_3.getCurrentConsumption().getMemorySize()); // 8. Release each container from app_0 for (RMContainer rmContainer : app_0.getLiveContainers()) { @@ -1203,11 +1203,11 @@ public class TestLeafQueue { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); } - assertEquals(5*GB, a.getUsedResources().getMemory()); - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(3*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(2*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(5*GB, a.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(3*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(2*GB, app_3.getCurrentConsumption().getMemorySize()); // 9. Release each container from app_2 for (RMContainer rmContainer : app_2.getLiveContainers()) { @@ -1217,11 +1217,11 @@ public class TestLeafQueue { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); } - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(2*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(2*GB, app_3.getCurrentConsumption().getMemorySize()); // 10. Release each container from app_3 for (RMContainer rmContainer : app_3.getLiveContainers()) { @@ -1231,11 +1231,11 @@ public class TestLeafQueue { ContainerExitStatus.KILLED_BY_RESOURCEMANAGER), RMContainerEventType.KILL, null, true); } - assertEquals(0*GB, a.getUsedResources().getMemory()); - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(0*GB, a.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); } @Test @@ -1289,9 +1289,9 @@ public class TestLeafQueue { // Only 1 container a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(1*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(1*GB, a.getMetrics().getAllocatedMB()); assertEquals(0*GB, a.getMetrics().getAvailableMB()); @@ -1300,20 +1300,20 @@ public class TestLeafQueue { // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(2*GB, a.getMetrics().getAllocatedMB()); // Now, reservation should kick in for app_1 a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(6*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(2*GB, node_0.getAllocatedResource().getMemory()); + assertEquals(6*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(2*GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(4*GB, a.getMetrics().getReservedMB()); assertEquals(2*GB, a.getMetrics().getAllocatedMB()); @@ -1326,11 +1326,11 @@ public class TestLeafQueue { RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(1*GB, node_0.getAllocatedResource().getMemory()); + assertEquals(5*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(1*GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(4*GB, a.getMetrics().getReservedMB()); assertEquals(1*GB, a.getMetrics().getAllocatedMB()); @@ -1343,11 +1343,11 @@ public class TestLeafQueue { RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(4*GB, a.getUsedResources().getMemory()); - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(4*GB, node_0.getAllocatedResource().getMemory()); + assertEquals(4*GB, a.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(4*GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(0*GB, a.getMetrics().getReservedMB()); assertEquals(4*GB, a.getMetrics().getAllocatedMB()); } @@ -1417,26 +1417,26 @@ public class TestLeafQueue { // Only 1 container a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(1*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // Also 2nd -> minCapacity = 1024 since (.1 * 8G) < minAlloc, also // you can get one container more than user-limit a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // Now, reservation should kick in for app_1 a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(6*GB, a.getUsedResources().getMemory()); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(2*GB, node_0.getAllocatedResource().getMemory()); + assertEquals(6*GB, a.getUsedResources().getMemorySize()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(2*GB, node_0.getAllocatedResource().getMemorySize()); // Now free 1 container from app_0 i.e. 1G, and re-reserve it RMContainer rmContainer = app_0.getLiveContainers().iterator().next(); @@ -1447,31 +1447,31 @@ public class TestLeafQueue { RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(1*GB, node_0.getAllocatedResource().getMemory()); + assertEquals(5*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(1*GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(1, app_1.getReReservations(priority)); // Re-reserve a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(1*GB, node_0.getAllocatedResource().getMemory()); + assertEquals(5*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(1*GB, node_0.getAllocatedResource().getMemorySize()); assertEquals(2, app_1.getReReservations(priority)); // Try to schedule on node_1 now, should *move* the reservation a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(9*GB, a.getUsedResources().getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(4*GB, node_1.getAllocatedResource().getMemory()); + assertEquals(9*GB, a.getUsedResources().getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(4*GB, node_1.getAllocatedResource().getMemorySize()); // Doesn't change yet... only when reservation is cancelled or a different // container is reserved assertEquals(2, app_1.getReReservations(priority)); @@ -1485,11 +1485,11 @@ public class TestLeafQueue { RMContainerEventType.KILL, null, true); a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(4*GB, a.getUsedResources().getMemory()); - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(4*GB, app_1.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentReservation().getMemory()); - assertEquals(0*GB, node_0.getAllocatedResource().getMemory()); + assertEquals(4*GB, a.getUsedResources().getMemorySize()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(4*GB, app_1.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentReservation().getMemorySize()); + assertEquals(0*GB, node_0.getAllocatedResource().getMemorySize()); } private void verifyContainerAllocated(CSAssignment assignment, NodeType nodeType) { @@ -2447,9 +2447,9 @@ public class TestLeafQueue { // app_1 will get containers as it has high priority a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); app_0_requests_0.clear(); app_0_requests_0.add( @@ -2465,12 +2465,12 @@ public class TestLeafQueue { //app_1 will still get assigned first as priority is more. a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemory()); - Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemorySize()); + Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); //and only then will app_2 a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); + Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); } @Test public void testConcurrentAccess() throws Exception { @@ -2594,9 +2594,9 @@ public class TestLeafQueue { // app_0 will get containers as its submitted first. a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); + Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + Assert.assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); app_0_requests_0.clear(); app_0_requests_0.add( @@ -2613,12 +2613,12 @@ public class TestLeafQueue { //Since it already has more resources, app_0 will not get //assigned first, but app_1 will a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemory()); + Assert.assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + Assert.assertEquals(2*GB, app_1.getCurrentConsumption().getMemorySize()); //and only then will app_0 a.assignContainers(clusterResource, node_0_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); + Assert.assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); } @@ -2773,10 +2773,10 @@ public class TestLeafQueue { // all users (only user_0) queue 'e' should be able to consume 1GB. // The first container should be assigned to app_0 with no headroom left // even though user_0's apps are still asking for a total of 4GB. - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // Assign 2nd container of 1GB e.assignContainers(clusterResource, node_0, @@ -2786,19 +2786,19 @@ public class TestLeafQueue { // scheduler will assign one container more than user-limit-factor. // This container also went to app_0. Still with no neadroom even though // app_0 and app_1 are asking for a cumulative 3GB. - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // Can't allocate 3rd container due to user-limit. Headroom still 0. e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB). // Pending for both app_0 and app_1 are still 3GB, so user-limit-factor @@ -2806,16 +2806,16 @@ public class TestLeafQueue { // getTotalPendingResourcesConsideringUserLimit() e.setUserLimitFactor(10.0f); assertEquals(3*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // app_0 is now satisified, app_1 is still asking for 2GB. - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(2*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // Get the last 2 containers for app_1, no more pending requests. e.assignContainers(clusterResource, node_0, @@ -2824,10 +2824,10 @@ public class TestLeafQueue { e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(3*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(2*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(3*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(2*GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // Release each container from app_0 for (RMContainer rmContainer : app_0.getLiveContainers()) { @@ -2929,14 +2929,14 @@ public class TestLeafQueue { // With queue capacity set at 1% of 100GB and user-limit-factor set to 1.0, // queue 'e' should be able to consume 1GB per user. assertEquals(2*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // None of the apps have assigned resources // user_0's apps: - assertEquals(0*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // user_1's apps: - assertEquals(0*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); // Assign 1st Container of 1GB e.assignContainers(clusterResource, node_0, @@ -2945,13 +2945,13 @@ public class TestLeafQueue { // The first container was assigned to user_0's app_0. Queues total headroom // has 1GB left for user_1. assertEquals(1*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // user_0's apps: - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_1.getCurrentConsumption().getMemorySize()); // user_1's apps: - assertEquals(0*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); // Assign 2nd container of 1GB e.assignContainers(clusterResource, node_0, @@ -2962,13 +2962,13 @@ public class TestLeafQueue { // this container went to user_0's app_1. so, headroom for queue 'e'e is // still 1GB for user_1 assertEquals(1*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // user_0's apps: - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); // user_1's apps: - assertEquals(0*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(0*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); // Assign 3rd container. e.assignContainers(clusterResource, node_0, @@ -2977,13 +2977,13 @@ public class TestLeafQueue { // Container was allocated to user_1's app_2 since user_1, Now, no headroom // is left. assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // user_0's apps: - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_1.getCurrentConsumption().getMemorySize()); // user_1's apps: - assertEquals(1*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(1*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); // Assign 4th container. e.assignContainers(clusterResource, node_0, @@ -2992,16 +2992,16 @@ public class TestLeafQueue { // Allocated to user_1's app_2 since scheduler allocates 1 container // above user resource limit. Available headroom still 0. assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // user_0's apps: - int app_0_consumption = app_0.getCurrentConsumption().getMemory(); + long app_0_consumption = app_0.getCurrentConsumption().getMemorySize(); assertEquals(1*GB, app_0_consumption); - int app_1_consumption = app_1.getCurrentConsumption().getMemory(); + long app_1_consumption = app_1.getCurrentConsumption().getMemorySize(); assertEquals(1*GB, app_1_consumption); // user_1's apps: - int app_2_consumption = app_2.getCurrentConsumption().getMemory(); + long app_2_consumption = app_2.getCurrentConsumption().getMemorySize(); assertEquals(2*GB, app_2_consumption); - int app_3_consumption = app_3.getCurrentConsumption().getMemory(); + long app_3_consumption = app_3.getCurrentConsumption().getMemorySize(); assertEquals(0*GB, app_3_consumption); // Attempt to assign 5th container. Will be a no-op. @@ -3011,13 +3011,13 @@ public class TestLeafQueue { // Cannot allocate 5th container because both users are above their allowed // user resource limit. Values should be the same as previously. assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // user_0's apps: - assertEquals(app_0_consumption, app_0.getCurrentConsumption().getMemory()); - assertEquals(app_1_consumption, app_1.getCurrentConsumption().getMemory()); + assertEquals(app_0_consumption, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(app_1_consumption, app_1.getCurrentConsumption().getMemorySize()); // user_1's apps: - assertEquals(app_2_consumption, app_2.getCurrentConsumption().getMemory()); - assertEquals(app_3_consumption, app_3.getCurrentConsumption().getMemory()); + assertEquals(app_2_consumption, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(app_3_consumption, app_3.getCurrentConsumption().getMemorySize()); // Increase user-limit-factor from 1GB to 10GB (1% * 10 * 100GB = 10GB). // Pending for both user_0 and user_1 are still 1GB each, so user-limit- @@ -3029,13 +3029,13 @@ public class TestLeafQueue { SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); // Next container goes to user_0's app_1, since it still wanted 1GB. assertEquals(1*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); // user_0's apps: - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(2*GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(2*GB, app_1.getCurrentConsumption().getMemorySize()); // user_1's apps: - assertEquals(2*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(0*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(2*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(0*GB, app_3.getCurrentConsumption().getMemorySize()); e.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), @@ -3043,12 +3043,12 @@ public class TestLeafQueue { // Last container goes to user_1's app_3, since it still wanted 1GB. // user_0's apps: assertEquals(0*GB, e.getTotalPendingResourcesConsideringUserLimit( - clusterResource, RMNodeLabelsManager.NO_LABEL).getMemory()); - assertEquals(1*GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(2*GB, app_1.getCurrentConsumption().getMemory()); + clusterResource, RMNodeLabelsManager.NO_LABEL).getMemorySize()); + assertEquals(1*GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(2*GB, app_1.getCurrentConsumption().getMemorySize()); // user_1's apps: - assertEquals(2*GB, app_2.getCurrentConsumption().getMemory()); - assertEquals(1*GB, app_3.getCurrentConsumption().getMemory()); + assertEquals(2*GB, app_2.getCurrentConsumption().getMemorySize()); + assertEquals(1*GB, app_3.getCurrentConsumption().getMemorySize()); // Release each container from app_0 for (RMContainer rmContainer : app_0.getLiveContainers()) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java index 59666f5ec2d..cff1514b8c8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestNodeLabelContainerAllocation.java @@ -39,7 +39,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.NullRMNodeLabelsManager; import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; @@ -522,22 +521,22 @@ public class TestNodeLabelContainerAllocation { Assert.assertEquals(2, schedulerApp1.getLiveContainers().size()); Assert.assertTrue(schedulerApp1.getReservedContainers().size() > 0); Assert.assertEquals(9 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed("x").getMemory()); + .getUsed("x").getMemorySize()); Assert.assertEquals(4 * GB, cs.getRootQueue().getQueueResourceUsage() - .getReserved("x").getMemory()); + .getReserved("x").getMemorySize()); Assert.assertEquals(4 * GB, - leafQueue.getQueueResourceUsage().getReserved("x").getMemory()); + leafQueue.getQueueResourceUsage().getReserved("x").getMemorySize()); // Cancel asks of app2 and re-kick RM am1.allocate("*", 4 * GB, 0, new ArrayList()); cs.handle(new NodeUpdateSchedulerEvent(rmNode1)); Assert.assertEquals(5 * GB, cs.getRootQueue().getQueueResourceUsage() - .getUsed("x").getMemory()); + .getUsed("x").getMemorySize()); Assert.assertEquals(0, cs.getRootQueue().getQueueResourceUsage() - .getReserved("x").getMemory()); + .getReserved("x").getMemorySize()); Assert.assertEquals(0, leafQueue.getQueueResourceUsage().getReserved("x") - .getMemory()); + .getMemorySize()); rm1.close(); } @@ -549,7 +548,7 @@ public class TestNodeLabelContainerAllocation { app.getAppSchedulingInfo().getResourceRequest( Priority.newInstance(priority), "*"); Assert.assertEquals(memory, - rr.getCapability().getMemory() * rr.getNumContainers()); + rr.getCapability().getMemorySize() * rr.getNumContainers()); } private void checkLaunchedContainerNumOnNode(MockRM rm, NodeId nodeId, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java index 23dc8609275..890e998ea97 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestParentQueue.java @@ -174,14 +174,14 @@ public class TestParentQueue { private float computeQueueAbsoluteUsedCapacity(CSQueue queue, int expectedMemory, Resource clusterResource) { return ( - ((float)expectedMemory / (float)clusterResource.getMemory()) + ((float)expectedMemory / (float)clusterResource.getMemorySize()) ); } private float computeQueueUsedCapacity(CSQueue queue, int expectedMemory, Resource clusterResource) { return (expectedMemory / - (clusterResource.getMemory() * queue.getAbsoluteCapacity())); + (clusterResource.getMemorySize() * queue.getAbsoluteCapacity())); } final static float DELTA = 0.0001f; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java index 356ed464678..ee370b483df 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestQueueCapacities.java @@ -117,7 +117,7 @@ public class TestQueueCapacities { } void check(int mem, int cpu, Resource res) { - Assert.assertEquals(mem, res.getMemory()); + Assert.assertEquals(mem, res.getMemorySize()); Assert.assertEquals(cpu, res.getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java index 632b54705c0..e8ac804557b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestReservations.java @@ -266,88 +266,88 @@ public class TestReservations { // Only AM a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5 * GB, a.getUsedResources().getMemory()); - assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, a.getUsedResources().getMemorySize()); + assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(19 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); - assertEquals(16 * GB, app_0.getHeadroom().getMemory()); + assertEquals(16 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(13 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(13 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(11 * GB, app_0.getHeadroom().getMemory()); + assertEquals(11 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() - .getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + .getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // assign reducer to node 2 a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(18 * GB, a.getUsedResources().getMemory()); - assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(18 * GB, a.getUsedResources().getMemorySize()); + assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(6 * GB, a.getMetrics().getAvailableMB()); - assertEquals(6 * GB, app_0.getHeadroom().getMemory()); + assertEquals(6 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() - .getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); + .getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); // node_1 heartbeat and unreserves from node_0 in order to allocate // on node_1 a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(18 * GB, a.getUsedResources().getMemory()); - assertEquals(18 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(18 * GB, a.getUsedResources().getMemorySize()); + assertEquals(18 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(18 * GB, a.getMetrics().getAllocatedMB()); assertEquals(6 * GB, a.getMetrics().getAvailableMB()); - assertEquals(6 * GB, app_0.getHeadroom().getMemory()); + assertEquals(6 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(8 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(0, app_0.getTotalRequiredResources(priorityReduce)); } @@ -428,27 +428,27 @@ public class TestReservations { // Only AM a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(0 * GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(0 * GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(4 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(2 * GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(4 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(4 * GB, a.getMetrics().getAllocatedMB()); assertEquals(20 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(2 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(2 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // Add a few requests to each app app_0.updateResourceRequests(Collections.singletonList(TestUtils @@ -461,29 +461,29 @@ public class TestReservations { // add a reservation for app_0 a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(12 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(2 * GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(12 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(2 * GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(8 * GB, a.getMetrics().getReservedMB()); assertEquals(4 * GB, a.getMetrics().getAllocatedMB()); assertEquals(12 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(2 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(2 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // next assignment is beyond user limit for user_0 but it should assign to // app_1 for user_1 a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(14 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(4 * GB, app_1.getCurrentConsumption().getMemory()); + assertEquals(14 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(4 * GB, app_1.getCurrentConsumption().getMemorySize()); assertEquals(8 * GB, a.getMetrics().getReservedMB()); assertEquals(6 * GB, a.getMetrics().getAllocatedMB()); assertEquals(10 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(4 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(4 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); } @Test @@ -563,89 +563,89 @@ public class TestReservations { // Only AM a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5 * GB, a.getUsedResources().getMemory()); - assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, a.getUsedResources().getMemorySize()); + assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(19 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); - assertEquals(16 * GB, app_0.getHeadroom().getMemory()); + assertEquals(16 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(13 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(13 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(11 * GB, app_0.getHeadroom().getMemory()); + assertEquals(11 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() - .getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + .getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // assign reducer to node 2 a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(18 * GB, a.getUsedResources().getMemory()); - assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(18 * GB, a.getUsedResources().getMemorySize()); + assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(6 * GB, a.getMetrics().getAvailableMB()); - assertEquals(6 * GB, app_0.getHeadroom().getMemory()); + assertEquals(6 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() - .getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); + .getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); // node_1 heartbeat and won't unreserve from node_0, potentially stuck // if AM doesn't handle a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(18 * GB, a.getUsedResources().getMemory()); - assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(18 * GB, a.getUsedResources().getMemorySize()); + assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(6 * GB, a.getMetrics().getAvailableMB()); - assertEquals(6 * GB, app_0.getHeadroom().getMemory()); + assertEquals(6 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() - .getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); + .getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); } @@ -723,66 +723,66 @@ public class TestReservations { // Only AM a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(14 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5 * GB, a.getUsedResources().getMemory()); - assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, a.getUsedResources().getMemorySize()); + assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(8 * GB, a.getMetrics().getAvailableMB()); - assertEquals(8 * GB, app_0.getHeadroom().getMemory()); + assertEquals(8 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // try to assign reducer (5G on node 0 and should reserve) a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(13 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(13 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); - assertEquals(3 * GB, app_0.getHeadroom().getMemory()); + assertEquals(3 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(5 * GB, node_0.getReservedContainer().getReservedResource() - .getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + .getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(2, app_0.getTotalRequiredResources(priorityReduce)); // could allocate but told need to unreserve first a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(13 * GB, a.getUsedResources().getMemory()); - assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(13 * GB, a.getUsedResources().getMemorySize()); + assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); - assertEquals(3 * GB, app_0.getHeadroom().getMemory()); + assertEquals(3 * GB, app_0.getHeadroom().getMemorySize()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(8 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(8 * GB, node_1.getAllocatedResource().getMemorySize()); assertEquals(1, app_0.getTotalRequiredResources(priorityReduce)); } @@ -986,50 +986,50 @@ public class TestReservations { // Only AM a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(14 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5 * GB, a.getUsedResources().getMemory()); - assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, a.getUsedResources().getMemorySize()); + assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(8 * GB, a.getMetrics().getAvailableMB()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); // now add in reservations and make sure it continues if config set // allocate to queue so that the potential new capacity is greater then // absoluteMaxCapacity a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(13 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(13 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); - assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(3 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); ResourceLimits limits = new ResourceLimits(Resources.createResource(13 * GB)); @@ -1042,7 +1042,7 @@ public class TestReservations { // 16GB total, 13GB consumed (8 allocated, 5 reserved). asking for 5GB so we would have to // unreserve 2GB to get the total 5GB needed. // also note vcore checks not enabled - assertEquals(0, limits.getHeadroom().getMemory()); + assertEquals(0, limits.getHeadroom().getMemorySize()); refreshQueuesTurnOffReservationsContLook(a, csConf); @@ -1160,52 +1160,52 @@ public class TestReservations { // Only AM a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(14 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5 * GB, a.getUsedResources().getMemory()); - assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, a.getUsedResources().getMemorySize()); + assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(8 * GB, a.getMetrics().getAvailableMB()); assertEquals(null, node_0.getReservedContainer()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); // now add in reservations and make sure it continues if config set // allocate to queue so that the potential new capacity is greater then // absoluteMaxCapacity a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(13 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); - assertEquals(5 * GB, app_0.getCurrentReservation().getMemory()); + assertEquals(13 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); + assertEquals(5 * GB, app_0.getCurrentReservation().getMemorySize()); assertEquals(5 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); - assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); + assertEquals(3 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); // not over the limit Resource limit = Resources.createResource(14 * GB, 0); @@ -1312,39 +1312,39 @@ public class TestReservations { // Only AM a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(2 * GB, a.getUsedResources().getMemory()); - assertEquals(2 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(2 * GB, a.getUsedResources().getMemorySize()); + assertEquals(2 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(2 * GB, a.getMetrics().getAllocatedMB()); assertEquals(22 * GB, a.getMetrics().getAvailableMB()); - assertEquals(2 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // Only 1 map - simulating reduce a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(5 * GB, a.getUsedResources().getMemory()); - assertEquals(5 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(5 * GB, a.getUsedResources().getMemorySize()); + assertEquals(5 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(5 * GB, a.getMetrics().getAllocatedMB()); assertEquals(19 * GB, a.getMetrics().getAvailableMB()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // Only 1 map to other node - simulating reduce a.assignContainers(clusterResource, node_1, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); - assertEquals(16 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(16 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // try to assign reducer (5G on node 0), but tell it's resource limits < // used (8G) + required (5G). It will not reserved since it has to unreserve @@ -1352,72 +1352,72 @@ public class TestReservations { // unreserve resource to reserve container. a.assignContainers(clusterResource, node_0, new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); // app_0's headroom = limit (10G) - used (8G) = 2G - assertEquals(2 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // try to assign reducer (5G on node 0), but tell it's resource limits < // used (8G) + required (5G). It will not reserved since it has to unreserve // some resource. Unfortunately, there's nothing to unreserve. a.assignContainers(clusterResource, node_2, new ResourceLimits(Resources.createResource(10 * GB)), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(8 * GB, a.getUsedResources().getMemory()); - assertEquals(8 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(8 * GB, a.getUsedResources().getMemorySize()); + assertEquals(8 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(8 * GB, a.getMetrics().getAllocatedMB()); assertEquals(16 * GB, a.getMetrics().getAvailableMB()); // app_0's headroom = limit (10G) - used (8G) = 2G - assertEquals(2 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(0 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(2 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(0 * GB, node_2.getAllocatedResource().getMemorySize()); // let it assign 5G to node_2 a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(13 * GB, a.getUsedResources().getMemory()); - assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(13 * GB, a.getUsedResources().getMemorySize()); + assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(0 * GB, a.getMetrics().getReservedMB()); assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(11 * GB, a.getMetrics().getAvailableMB()); - assertEquals(11 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(11 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); // reserve 8G node_0 a.assignContainers(clusterResource, node_0, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(21 * GB, a.getUsedResources().getMemory()); - assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(21 * GB, a.getUsedResources().getMemorySize()); + assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(8 * GB, a.getMetrics().getReservedMB()); assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); - assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(3 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); // try to assign (8G on node 2). No room to allocate, // continued to try due to having reservation above, // but hits queue limits so can't reserve anymore. a.assignContainers(clusterResource, node_2, new ResourceLimits(clusterResource), SchedulingMode.RESPECT_PARTITION_EXCLUSIVITY); - assertEquals(21 * GB, a.getUsedResources().getMemory()); - assertEquals(13 * GB, app_0.getCurrentConsumption().getMemory()); + assertEquals(21 * GB, a.getUsedResources().getMemorySize()); + assertEquals(13 * GB, app_0.getCurrentConsumption().getMemorySize()); assertEquals(8 * GB, a.getMetrics().getReservedMB()); assertEquals(13 * GB, a.getMetrics().getAllocatedMB()); assertEquals(3 * GB, a.getMetrics().getAvailableMB()); - assertEquals(3 * GB, app_0.getHeadroom().getMemory()); - assertEquals(5 * GB, node_0.getAllocatedResource().getMemory()); - assertEquals(3 * GB, node_1.getAllocatedResource().getMemory()); - assertEquals(5 * GB, node_2.getAllocatedResource().getMemory()); + assertEquals(3 * GB, app_0.getHeadroom().getMemorySize()); + assertEquals(5 * GB, node_0.getAllocatedResource().getMemorySize()); + assertEquals(3 * GB, node_1.getAllocatedResource().getMemorySize()); + assertEquals(5 * GB, node_2.getAllocatedResource().getMemorySize()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java index cb62ba04c28..0386aabf9f9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestWorkPreservingRMRestartForNodeLabel.java @@ -112,14 +112,14 @@ public class TestWorkPreservingRMRestartForNodeLabel { FiCaSchedulerApp app = cs.getSchedulerApplications().get(appId).getCurrentAppAttempt(); Assert.assertEquals(expectedMemUsage, app.getAppAttemptResourceUsage() - .getUsed(partition).getMemory()); + .getUsed(partition).getMemorySize()); } private void checkQueueResourceUsage(String partition, String queueName, MockRM rm, int expectedMemUsage) { CapacityScheduler cs = (CapacityScheduler) rm.getResourceScheduler(); CSQueue queue = cs.getQueue(queueName); Assert.assertEquals(expectedMemUsage, queue.getQueueResourceUsage() - .getUsed(partition).getMemory()); + .getUsed(partition).getMemorySize()); } @Test diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java index 0e1d90424e8..ec0e6aa875b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java @@ -275,8 +275,8 @@ public class FairSchedulerTestBase { } // available resource - Assert.assertEquals(resource.getMemory(), - app.getCurrentConsumption().getMemory()); + Assert.assertEquals(resource.getMemorySize(), + app.getCurrentConsumption().getMemorySize()); Assert.assertEquals(resource.getVirtualCores(), app.getCurrentConsumption().getVirtualCores()); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java index 9d8dd073d9e..4f3ccb2acd4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestComputeFairShares.java @@ -196,7 +196,7 @@ public class TestComputeFairShares { private void verifyMemoryShares(int... shares) { Assert.assertEquals(scheds.size(), shares.length); for (int i = 0; i < shares.length; i++) { - Assert.assertEquals(shares[i], scheds.get(i).getFairShare().getMemory()); + Assert.assertEquals(shares[i], scheds.get(i).getFairShare().getMemorySize()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java index 2e7b3f85e6e..f7b5f3bdbb5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestContinuousScheduling.java @@ -125,7 +125,7 @@ public class TestContinuousScheduling extends FairSchedulerTestBase { scheduler.handle(nodeEvent2); // available resource - Assert.assertEquals(scheduler.getClusterResource().getMemory(), 16 * 1024); + Assert.assertEquals(scheduler.getClusterResource().getMemorySize(), 16 * 1024); Assert.assertEquals(scheduler.getClusterResource().getVirtualCores(), 16); // send application request diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java index e733b1c2a1e..1901fa99df5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSAppAttempt.java @@ -22,7 +22,6 @@ import java.util.ArrayList; import java.util.List; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; @@ -30,7 +29,6 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.isA; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.spy; @@ -238,9 +236,9 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy .getInstance(DominantResourceFairnessPolicy.class)); verifyHeadroom(schedulerApp, - min(queueStarvation.getMemory(), - clusterAvailable.getMemory(), - queueMaxResourcesAvailable.getMemory()), + min(queueStarvation.getMemorySize(), + clusterAvailable.getMemorySize(), + queueMaxResourcesAvailable.getMemorySize()), min(queueStarvation.getVirtualCores(), clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()) @@ -250,9 +248,9 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy .getInstance(FairSharePolicy.class)); verifyHeadroom(schedulerApp, - min(queueStarvation.getMemory(), - clusterAvailable.getMemory(), - queueMaxResourcesAvailable.getMemory()), + min(queueStarvation.getMemorySize(), + clusterAvailable.getMemorySize(), + queueMaxResourcesAvailable.getMemorySize()), Math.min( clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()) @@ -261,9 +259,9 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { Mockito.when(mockQueue.getPolicy()).thenReturn(SchedulingPolicy .getInstance(FifoPolicy.class)); verifyHeadroom(schedulerApp, - min(queueStarvation.getMemory(), - clusterAvailable.getMemory(), - queueMaxResourcesAvailable.getMemory()), + min(queueStarvation.getMemorySize(), + clusterAvailable.getMemorySize(), + queueMaxResourcesAvailable.getMemorySize()), Math.min( clusterAvailable.getVirtualCores(), queueMaxResourcesAvailable.getVirtualCores()) @@ -288,9 +286,9 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { Resource clusterResource = scheduler.getClusterResource(); Resource clusterUsage = scheduler.getRootQueueMetrics() .getAllocatedResources(); - assertEquals(12 * 1024, clusterResource.getMemory()); + assertEquals(12 * 1024, clusterResource.getMemorySize()); assertEquals(12, clusterResource.getVirtualCores()); - assertEquals(0, clusterUsage.getMemory()); + assertEquals(0, clusterUsage.getMemorySize()); assertEquals(0, clusterUsage.getVirtualCores()); ApplicationAttemptId id11 = createAppAttemptId(1, 1); createMockRMApp(id11); @@ -302,7 +300,7 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { FSAppAttempt app = scheduler.getSchedulerApp(id11); assertNotNull(app); Resource queueUsage = app.getQueue().getResourceUsage(); - assertEquals(0, queueUsage.getMemory()); + assertEquals(0, queueUsage.getMemorySize()); assertEquals(0, queueUsage.getVirtualCores()); SchedulerNode n1 = scheduler.getSchedulerNode(node1.getNodeID()); SchedulerNode n2 = scheduler.getSchedulerNode(node2.getNodeID()); @@ -337,14 +335,14 @@ public class TestFSAppAttempt extends FairSchedulerTestBase { assertEquals(clusterResource, spyApp.getHeadroom()); } - private static int min(int value1, int value2, int value3) { + private static long min(long value1, long value2, long value3) { return Math.min(Math.min(value1, value2), value3); } protected void verifyHeadroom(FSAppAttempt schedulerApp, - int expectedMemory, int expectedCPU) { + long expectedMemory, long expectedCPU) { Resource headroom = schedulerApp.getHeadroom(); - assertEquals(expectedMemory, headroom.getMemory()); + assertEquals(expectedMemory, headroom.getMemorySize()); assertEquals(expectedCPU, headroom.getVirtualCores()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java index 867fd22a710..ad4e2e436cf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFSLeafQueue.java @@ -43,7 +43,6 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.MockRM; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.metrics.SystemMetricsEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.NodeAddedSchedulerEvent; @@ -211,7 +210,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase { QueueManager queueMgr = scheduler.getQueueManager(); FSLeafQueue queueA = queueMgr.getLeafQueue("queueA", false); - assertEquals(4 * 1024, queueA.getResourceUsage().getMemory()); + assertEquals(4 * 1024, queueA.getResourceUsage().getMemorySize()); // Both queue B1 and queue B2 want 3 * 1024 createSchedulingRequest(1 * 1024, "queueB.queueB1", "user1", 3); @@ -223,8 +222,8 @@ public class TestFSLeafQueue extends FairSchedulerTestBase { FSLeafQueue queueB1 = queueMgr.getLeafQueue("queueB.queueB1", false); FSLeafQueue queueB2 = queueMgr.getLeafQueue("queueB.queueB2", false); - assertEquals(2 * 1024, queueB1.getResourceUsage().getMemory()); - assertEquals(2 * 1024, queueB2.getResourceUsage().getMemory()); + assertEquals(2 * 1024, queueB1.getResourceUsage().getMemorySize()); + assertEquals(2 * 1024, queueB2.getResourceUsage().getMemorySize()); // For queue B1, the fairSharePreemptionThreshold is 0.4, and the fair share // threshold is 1.6 * 1024 @@ -237,8 +236,8 @@ public class TestFSLeafQueue extends FairSchedulerTestBase { // Node checks in again scheduler.handle(nodeEvent2); scheduler.handle(nodeEvent2); - assertEquals(3 * 1024, queueB1.getResourceUsage().getMemory()); - assertEquals(3 * 1024, queueB2.getResourceUsage().getMemory()); + assertEquals(3 * 1024, queueB1.getResourceUsage().getMemorySize()); + assertEquals(3 * 1024, queueB2.getResourceUsage().getMemorySize()); // Both queue B1 and queue B2 usages go to 3 * 1024 assertFalse(queueB1.isStarvedForFairShare()); @@ -283,7 +282,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase { QueueManager queueMgr = scheduler.getQueueManager(); FSLeafQueue queueA = queueMgr.getLeafQueue("queueA", false); - assertEquals(7 * 1024, queueA.getResourceUsage().getMemory()); + assertEquals(7 * 1024, queueA.getResourceUsage().getMemorySize()); assertEquals(1, queueA.getResourceUsage().getVirtualCores()); // Queue B has 3 reqs : @@ -298,7 +297,7 @@ public class TestFSLeafQueue extends FairSchedulerTestBase { } FSLeafQueue queueB = queueMgr.getLeafQueue("queueB", false); - assertEquals(3 * 1024, queueB.getResourceUsage().getMemory()); + assertEquals(3 * 1024, queueB.getResourceUsage().getMemorySize()); assertEquals(6, queueB.getResourceUsage().getVirtualCores()); scheduler.update(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index 87c018f723e..ff08349c01f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -220,10 +220,10 @@ public class TestFairScheduler extends FairSchedulerTestBase { Assert.assertEquals(10, scheduler.continuousSchedulingSleepMs); Assert.assertEquals(5000, scheduler.nodeLocalityDelayMs); Assert.assertEquals(5000, scheduler.rackLocalityDelayMs); - Assert.assertEquals(1024, scheduler.getMaximumResourceCapability().getMemory()); - Assert.assertEquals(512, scheduler.getMinimumResourceCapability().getMemory()); + Assert.assertEquals(1024, scheduler.getMaximumResourceCapability().getMemorySize()); + Assert.assertEquals(512, scheduler.getMinimumResourceCapability().getMemorySize()); Assert.assertEquals(128, - scheduler.getIncrementResourceCapability().getMemory()); + scheduler.getIncrementResourceCapability().getMemorySize()); } @Test @@ -238,9 +238,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); scheduler.init(conf); scheduler.reinitialize(conf, null); - Assert.assertEquals(256, scheduler.getMinimumResourceCapability().getMemory()); + Assert.assertEquals(256, scheduler.getMinimumResourceCapability().getMemorySize()); Assert.assertEquals(1, scheduler.getMinimumResourceCapability().getVirtualCores()); - Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemory()); + Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemorySize()); Assert.assertEquals(2, scheduler.getIncrementResourceCapability().getVirtualCores()); } @@ -256,9 +256,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_VCORES, 2); scheduler.init(conf); scheduler.reinitialize(conf, null); - Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getMemory()); + Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getMemorySize()); Assert.assertEquals(0, scheduler.getMinimumResourceCapability().getVirtualCores()); - Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemory()); + Assert.assertEquals(512, scheduler.getIncrementResourceCapability().getMemorySize()); Assert.assertEquals(2, scheduler.getIncrementResourceCapability().getVirtualCores()); } @@ -274,19 +274,19 @@ public class TestFairScheduler extends FairSchedulerTestBase { .newNodeInfo(1, Resources.createResource(1024), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - assertEquals(1024, scheduler.getClusterResource().getMemory()); + assertEquals(1024, scheduler.getClusterResource().getMemorySize()); // Add another node RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(512), 2, "127.0.0.2"); NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); scheduler.handle(nodeEvent2); - assertEquals(1536, scheduler.getClusterResource().getMemory()); + assertEquals(1536, scheduler.getClusterResource().getMemorySize()); // Remove the first node NodeRemovedSchedulerEvent nodeEvent3 = new NodeRemovedSchedulerEvent(node1); scheduler.handle(nodeEvent3); - assertEquals(512, scheduler.getClusterResource().getMemory()); + assertEquals(512, scheduler.getClusterResource().getMemorySize()); } @Test @@ -317,9 +317,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Divided three ways - between the two queues and the default queue for (FSLeafQueue p : queues) { - assertEquals(3414, p.getFairShare().getMemory()); + assertEquals(3414, p.getFairShare().getMemorySize()); assertEquals(3414, p.getMetrics().getFairShareMB()); - assertEquals(3414, p.getSteadyFairShare().getMemory()); + assertEquals(3414, p.getSteadyFairShare().getMemorySize()); assertEquals(3414, p.getMetrics().getSteadyFairShareMB()); } } @@ -365,11 +365,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue( "queueA", false); // queueA's weight is 0.25, so its fair share should be 2 * 1024. - assertEquals(2 * 1024, queue.getFairShare().getMemory()); + assertEquals(2 * 1024, queue.getFairShare().getMemorySize()); // queueB's weight is 0.75, so its fair share should be 6 * 1024. queue = scheduler.getQueueManager().getLeafQueue( "queueB", false); - assertEquals(6 * 1024, queue.getFairShare().getMemory()); + assertEquals(6 * 1024, queue.getFairShare().getMemorySize()); } @Test @@ -409,11 +409,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { FSLeafQueue queue = scheduler.getQueueManager().getLeafQueue( "queueA", false); // queueA's weight is 0.0, so its fair share should be 0. - assertEquals(0, queue.getFairShare().getMemory()); + assertEquals(0, queue.getFairShare().getMemorySize()); // queueB's weight is 0.0, so its fair share should be 0. queue = scheduler.getQueueManager().getLeafQueue( "queueB", false); - assertEquals(0, queue.getFairShare().getMemory()); + assertEquals(0, queue.getFairShare().getMemorySize()); } @Test @@ -457,12 +457,12 @@ public class TestFairScheduler extends FairSchedulerTestBase { "queueA", false); // queueA's weight is 0.0 and minResources is 1, // so its fair share should be 1 (minShare). - assertEquals(1, queue.getFairShare().getMemory()); + assertEquals(1, queue.getFairShare().getMemorySize()); // queueB's weight is 0.0 and minResources is 1, // so its fair share should be 1 (minShare). queue = scheduler.getQueueManager().getLeafQueue( "queueB", false); - assertEquals(1, queue.getFairShare().getMemory()); + assertEquals(1, queue.getFairShare().getMemorySize()); } @Test @@ -507,12 +507,12 @@ public class TestFairScheduler extends FairSchedulerTestBase { "queueA", false); // queueA's weight is 0.5 and minResources is 1024, // so its fair share should be 4096. - assertEquals(4096, queue.getFairShare().getMemory()); + assertEquals(4096, queue.getFairShare().getMemorySize()); // queueB's weight is 0.5 and minResources is 1024, // so its fair share should be 4096. queue = scheduler.getQueueManager().getLeafQueue( "queueB", false); - assertEquals(4096, queue.getFairShare().getMemory()); + assertEquals(4096, queue.getFairShare().getMemorySize()); } @Test @@ -606,17 +606,17 @@ public class TestFairScheduler extends FairSchedulerTestBase { FSLeafQueue queue1 = queueManager.getLeafQueue("default", true); FSLeafQueue queue2 = queueManager.getLeafQueue("parent.queue2", true); FSLeafQueue queue3 = queueManager.getLeafQueue("parent.queue3", true); - assertEquals(capacity / 2, queue1.getFairShare().getMemory()); + assertEquals(capacity / 2, queue1.getFairShare().getMemorySize()); assertEquals(capacity / 2, queue1.getMetrics().getFairShareMB()); - assertEquals(capacity / 2, queue1.getSteadyFairShare().getMemory()); + assertEquals(capacity / 2, queue1.getSteadyFairShare().getMemorySize()); assertEquals(capacity / 2, queue1.getMetrics().getSteadyFairShareMB()); - assertEquals(capacity / 4, queue2.getFairShare().getMemory()); + assertEquals(capacity / 4, queue2.getFairShare().getMemorySize()); assertEquals(capacity / 4, queue2.getMetrics().getFairShareMB()); - assertEquals(capacity / 4, queue2.getSteadyFairShare().getMemory()); + assertEquals(capacity / 4, queue2.getSteadyFairShare().getMemorySize()); assertEquals(capacity / 4, queue2.getMetrics().getSteadyFairShareMB()); - assertEquals(capacity / 4, queue3.getFairShare().getMemory()); + assertEquals(capacity / 4, queue3.getFairShare().getMemorySize()); assertEquals(capacity / 4, queue3.getMetrics().getFairShareMB()); - assertEquals(capacity / 4, queue3.getSteadyFairShare().getMemory()); + assertEquals(capacity / 4, queue3.getSteadyFairShare().getMemorySize()); assertEquals(capacity / 4, queue3.getMetrics().getSteadyFairShareMB()); } @@ -717,13 +717,13 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals( FairSchedulerConfiguration.DEFAULT_RM_SCHEDULER_INCREMENT_ALLOCATION_MB, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2); scheduler.handle(updateEvent2); assertEquals(1024, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); assertEquals(2, scheduler.getQueueManager().getQueue("queue1"). getResourceUsage().getVirtualCores()); @@ -760,7 +760,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 1 is allocated app capacity assertEquals(1024, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // Now queue 2 requests likewise ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", "user1", 1); @@ -770,8 +770,8 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 2 is waiting with a reservation assertEquals(0, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); - assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory()); + getResourceUsage().getMemorySize()); + assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemorySize()); // Now another node checks in with capacity RMNode node2 = @@ -784,13 +784,13 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure this goes to queue 2 assertEquals(1024, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // The old reservation should still be there... - assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory()); + assertEquals(1024, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemorySize()); // ... but it should disappear when we update the first node. scheduler.handle(updateEvent); - assertEquals(0, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemory()); + assertEquals(0, scheduler.getSchedulerApp(attId).getCurrentReservation().getMemorySize()); } @@ -834,7 +834,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Verify capacity allocation assertEquals(6144, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // Create new app with a resource request that can be satisfied by any // node but would be @@ -866,7 +866,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(new NodeUpdateSchedulerEvent(node4)); assertEquals(8192, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); scheduler.handle(new NodeUpdateSchedulerEvent(node1)); scheduler.handle(new NodeUpdateSchedulerEvent(node2)); @@ -927,7 +927,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Verify capacity allocation assertEquals(8192, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // Create new app with a resource request that can be satisfied by any // node but would be @@ -972,7 +972,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(new NodeUpdateSchedulerEvent(node4)); assertEquals(10240, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); scheduler.handle(new NodeUpdateSchedulerEvent(node1)); scheduler.handle(new NodeUpdateSchedulerEvent(node2)); @@ -1016,7 +1016,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Verify capacity allocation assertEquals(8192, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // Verify number of reservations have decremented assertEquals(0, @@ -1060,7 +1060,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 1 is allocated app capacity assertEquals(2048, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // Now queue 2 requests likewise createSchedulingRequest(1024, "queue2", "user2", 1); @@ -1069,7 +1069,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 2 is allocated app capacity assertEquals(1024, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 1); scheduler.update(); @@ -1078,7 +1078,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Ensure the reservation does not get created as allocated memory of // queue1 exceeds max assertEquals(0, scheduler.getSchedulerApp(attId1). - getCurrentReservation().getMemory()); + getCurrentReservation().getMemorySize()); } @Test (timeout = 500000) @@ -1117,7 +1117,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 1 is allocated app capacity assertEquals(2048, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // Now queue 2 requests likewise createSchedulingRequest(1024, "queue2", "user2", 1); @@ -1126,7 +1126,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 2 is allocated app capacity assertEquals(1024, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", "user1", 1); scheduler.update(); @@ -1134,7 +1134,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 1 is waiting with a reservation assertEquals(1024, scheduler.getSchedulerApp(attId1) - .getCurrentReservation().getMemory()); + .getCurrentReservation().getMemorySize()); // Exercise checks that reservation fits scheduler.handle(updateEvent); @@ -1142,7 +1142,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Ensure the reservation still exists as allocated memory of queue1 doesn't // exceed max assertEquals(1024, scheduler.getSchedulerApp(attId1). - getCurrentReservation().getMemory()); + getCurrentReservation().getMemorySize()); // Now reduce max Resources of queue1 down to 2048 out = new PrintWriter(new FileWriter(ALLOC_FILE)); @@ -1166,12 +1166,12 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure allocated memory of queue1 doesn't exceed its maximum assertEquals(2048, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); //the reservation of queue1 should be reclaim assertEquals(0, scheduler.getSchedulerApp(attId1). - getCurrentReservation().getMemory()); + getCurrentReservation().getMemorySize()); assertEquals(1024, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); } @Test @@ -1211,7 +1211,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 1 is allocated app capacity assertEquals(4096, scheduler.getQueueManager().getQueue("queue1"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); // Now queue 2 requests below threshold ApplicationAttemptId attId = createSchedulingRequest(1024, "queue2", "user1", 1); @@ -1220,7 +1220,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 2 has no reservation assertEquals(0, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); assertEquals(0, scheduler.getSchedulerApp(attId).getReservedContainers().size()); @@ -1231,7 +1231,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Make sure queue 2 is waiting with a reservation assertEquals(0, scheduler.getQueueManager().getQueue("queue2"). - getResourceUsage().getMemory()); + getResourceUsage().getMemorySize()); assertEquals(3, scheduler.getSchedulerApp(attId).getCurrentReservation() .getVirtualCores()); @@ -1444,10 +1444,10 @@ public class TestFairScheduler extends FairSchedulerTestBase { for (FSLeafQueue p : queues) { if (p.getName().equals("root.queueA")) { - assertEquals(1024, p.getFairShare().getMemory()); + assertEquals(1024, p.getFairShare().getMemorySize()); } else if (p.getName().equals("root.queueB")) { - assertEquals(2048, p.getFairShare().getMemory()); + assertEquals(2048, p.getFairShare().getMemorySize()); } } } @@ -1536,9 +1536,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { if (leaf.getName().equals("root.parentq.user1") || leaf.getName().equals("root.parentq.user2")) { // assert that the fair share is 1/4th node1's capacity - assertEquals(capacity / 4, leaf.getFairShare().getMemory()); + assertEquals(capacity / 4, leaf.getFairShare().getMemorySize()); // assert that the steady fair share is 1/4th node1's capacity - assertEquals(capacity / 4, leaf.getSteadyFairShare().getMemory()); + assertEquals(capacity / 4, leaf.getSteadyFairShare().getMemorySize()); // assert weights are equal for both the user queues assertEquals(1.0, leaf.getWeights().getWeight(ResourceType.MEMORY), 0); } @@ -1572,9 +1572,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { // The steady fair share for all queues should be 0 QueueManager queueManager = scheduler.getQueueManager(); assertEquals(0, queueManager.getLeafQueue("child1", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); assertEquals(0, queueManager.getLeafQueue("child2", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); // Add one node RMNode node1 = @@ -1582,13 +1582,13 @@ public class TestFairScheduler extends FairSchedulerTestBase { .newNodeInfo(1, Resources.createResource(6144), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - assertEquals(6144, scheduler.getClusterResource().getMemory()); + assertEquals(6144, scheduler.getClusterResource().getMemorySize()); // The steady fair shares for all queues should be updated assertEquals(2048, queueManager.getLeafQueue("child1", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); assertEquals(2048, queueManager.getLeafQueue("child2", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); // Reload the allocation configuration file out = new PrintWriter(new FileWriter(ALLOC_FILE)); @@ -1613,20 +1613,20 @@ public class TestFairScheduler extends FairSchedulerTestBase { // The steady fair shares for all queues should be updated assertEquals(1024, queueManager.getLeafQueue("child1", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); assertEquals(2048, queueManager.getLeafQueue("child2", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); assertEquals(2048, queueManager.getLeafQueue("child3", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); // Remove the node, steady fair shares should back to 0 NodeRemovedSchedulerEvent nodeEvent2 = new NodeRemovedSchedulerEvent(node1); scheduler.handle(nodeEvent2); - assertEquals(0, scheduler.getClusterResource().getMemory()); + assertEquals(0, scheduler.getClusterResource().getMemorySize()); assertEquals(0, queueManager.getLeafQueue("child1", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); assertEquals(0, queueManager.getLeafQueue("child2", false) - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); } @Test @@ -1644,19 +1644,19 @@ public class TestFairScheduler extends FairSchedulerTestBase { .newNodeInfo(1, Resources.createResource(6144), 1, "127.0.0.1"); NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); scheduler.handle(nodeEvent1); - assertEquals(6144, scheduler.getClusterResource().getMemory()); + assertEquals(6144, scheduler.getClusterResource().getMemorySize()); assertEquals(6144, scheduler.getQueueManager().getRootQueue() - .getSteadyFairShare().getMemory()); + .getSteadyFairShare().getMemorySize()); assertEquals(6144, scheduler.getQueueManager() - .getLeafQueue("default", false).getSteadyFairShare().getMemory()); + .getLeafQueue("default", false).getSteadyFairShare().getMemorySize()); // Submit one application ApplicationAttemptId appAttemptId1 = createAppAttemptId(1, 1); createApplicationWithAMResource(appAttemptId1, "default", "user1", null); assertEquals(3072, scheduler.getQueueManager() - .getLeafQueue("default", false).getSteadyFairShare().getMemory()); + .getLeafQueue("default", false).getSteadyFairShare().getMemorySize()); assertEquals(3072, scheduler.getQueueManager() - .getLeafQueue("user1", false).getSteadyFairShare().getMemory()); + .getLeafQueue("user1", false).getSteadyFairShare().getMemorySize()); } /** @@ -1715,10 +1715,10 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); assertEquals(2 * minReqSize, scheduler.getQueueManager().getQueue("root.queue1") - .getDemand().getMemory()); + .getDemand().getMemorySize()); assertEquals(2 * minReqSize + 2 * minReqSize, scheduler .getQueueManager().getQueue("root.queue2").getDemand() - .getMemory()); + .getMemorySize()); } @Test @@ -1828,9 +1828,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { // One container should get reservation and the other should get nothing assertEquals(1024, - scheduler.getSchedulerApp(attId1).getCurrentReservation().getMemory()); + scheduler.getSchedulerApp(attId1).getCurrentReservation().getMemorySize()); assertEquals(0, - scheduler.getSchedulerApp(attId2).getCurrentReservation().getMemory()); + scheduler.getSchedulerApp(attId2).getCurrentReservation().getMemorySize()); } @Test (timeout = 5000) @@ -3149,7 +3149,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true); assertEquals("Queue queue1's fair share should be 0", 0, queue1 - .getFairShare().getMemory()); + .getFairShare().getMemorySize()); createSchedulingRequest(1 * 1024, "root.default", "user1"); scheduler.update(); @@ -3167,11 +3167,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application1's AM requests 1024 MB memory", - 1024, app1.getAMResource().getMemory()); + 1024, app1.getAMResource().getMemorySize()); assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 1024 MB memory", - 1024, queue1.getAmResourceUsage().getMemory()); + 1024, queue1.getAmResourceUsage().getMemorySize()); // Exceeds no limits ApplicationAttemptId attId2 = createAppAttemptId(2, 1); @@ -3181,11 +3181,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application2's AM requests 1024 MB memory", - 1024, app2.getAMResource().getMemory()); + 1024, app2.getAMResource().getMemorySize()); assertEquals("Application2's AM should be running", 1, app2.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Exceeds queue limit ApplicationAttemptId attId3 = createAppAttemptId(3, 1); @@ -3195,11 +3195,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application3's AM resource shouldn't be updated", - 0, app3.getAMResource().getMemory()); + 0, app3.getAMResource().getMemorySize()); assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Still can run non-AM container createSchedulingRequestExistingApplication(1024, 1, attId1); @@ -3208,7 +3208,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals("Application1 should have two running containers", 2, app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Remove app1, app3's AM should become running AppAttemptRemovedSchedulerEvent appRemovedEvent1 = @@ -3221,9 +3221,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals("Application3's AM should be running", 1, app3.getLiveContainers().size()); assertEquals("Application3's AM requests 1024 MB memory", - 1024, app3.getAMResource().getMemory()); + 1024, app3.getAMResource().getMemorySize()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Exceeds queue limit ApplicationAttemptId attId4 = createAppAttemptId(4, 1); @@ -3233,11 +3233,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application4's AM resource shouldn't be updated", - 0, app4.getAMResource().getMemory()); + 0, app4.getAMResource().getMemorySize()); assertEquals("Application4's AM should not be running", 0, app4.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Exceeds queue limit ApplicationAttemptId attId5 = createAppAttemptId(5, 1); @@ -3247,11 +3247,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application5's AM resource shouldn't be updated", - 0, app5.getAMResource().getMemory()); + 0, app5.getAMResource().getMemorySize()); assertEquals("Application5's AM should not be running", 0, app5.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Remove un-running app doesn't affect others AppAttemptRemovedSchedulerEvent appRemovedEvent4 = @@ -3262,7 +3262,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals("Application5's AM should not be running", 0, app5.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Remove app2 and app3, app5's AM should become running AppAttemptRemovedSchedulerEvent appRemovedEvent2 = @@ -3280,9 +3280,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals("Application5's AM should be running", 1, app5.getLiveContainers().size()); assertEquals("Application5's AM requests 2048 MB memory", - 2048, app5.getAMResource().getMemory()); + 2048, app5.getAMResource().getMemorySize()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // request non-AM container for app5 createSchedulingRequestExistingApplication(1024, 1, attId5); @@ -3297,7 +3297,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals("Application5's AM should have 0 container", 0, app5.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); scheduler.update(); scheduler.handle(updateEvent); // non-AM container should be allocated @@ -3307,7 +3307,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { 1, app5.getLiveContainers().size()); // check non-AM container allocation won't affect queue AmResourceUsage assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Check amResource normalization ApplicationAttemptId attId6 = createAppAttemptId(6, 1); @@ -3319,9 +3319,9 @@ public class TestFairScheduler extends FairSchedulerTestBase { assertEquals("Application6's AM should not be running", 0, app6.getLiveContainers().size()); assertEquals("Application6's AM resource shouldn't be updated", - 0, app6.getAMResource().getMemory()); + 0, app6.getAMResource().getMemorySize()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); // Remove all apps AppAttemptRemovedSchedulerEvent appRemovedEvent5 = @@ -3332,7 +3332,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.handle(appRemovedEvent6); scheduler.update(); assertEquals("Queue1's AM resource usage should be 0", - 0, queue1.getAmResourceUsage().getMemory()); + 0, queue1.getAmResourceUsage().getMemorySize()); } @Test @@ -3375,23 +3375,23 @@ public class TestFairScheduler extends FairSchedulerTestBase { FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1", true); assertEquals("Queue queue1's fair share should be 0", 0, queue1 - .getFairShare().getMemory()); + .getFairShare().getMemorySize()); FSLeafQueue queue2 = scheduler.getQueueManager().getLeafQueue("queue2", true); assertEquals("Queue queue2's fair share should be 0", 0, queue2 - .getFairShare().getMemory()); + .getFairShare().getMemorySize()); FSLeafQueue queue3 = scheduler.getQueueManager().getLeafQueue("queue3", true); assertEquals("Queue queue3's fair share should be 0", 0, queue3 - .getFairShare().getMemory()); + .getFairShare().getMemorySize()); FSLeafQueue queue4 = scheduler.getQueueManager().getLeafQueue("queue4", true); assertEquals("Queue queue4's fair share should be 0", 0, queue4 - .getFairShare().getMemory()); + .getFairShare().getMemorySize()); FSLeafQueue queue5 = scheduler.getQueueManager().getLeafQueue("queue5", true); assertEquals("Queue queue5's fair share should be 0", 0, queue5 - .getFairShare().getMemory()); + .getFairShare().getMemorySize()); List queues = Arrays.asList("root.queue3", "root.queue4", "root.queue5"); @@ -3413,11 +3413,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application1's AM requests 1024 MB memory", - 1024, app1.getAMResource().getMemory()); + 1024, app1.getAMResource().getMemorySize()); assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 1024 MB memory", - 1024, queue1.getAmResourceUsage().getMemory()); + 1024, queue1.getAmResourceUsage().getMemorySize()); // Now the fair share is 1639 MB, and the maxAMShare is 0.4f, // so the AM is not accepted. @@ -3428,11 +3428,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application2's AM resource shouldn't be updated", - 0, app2.getAMResource().getMemory()); + 0, app2.getAMResource().getMemorySize()); assertEquals("Application2's AM should not be running", 0, app2.getLiveContainers().size()); assertEquals("Queue2's AM resource usage should be 0 MB memory", - 0, queue2.getAmResourceUsage().getMemory()); + 0, queue2.getAmResourceUsage().getMemorySize()); // Remove the app2 AppAttemptRemovedSchedulerEvent appRemovedEvent2 = @@ -3450,11 +3450,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application3's AM resource shouldn't be updated", - 0, app3.getAMResource().getMemory()); + 0, app3.getAMResource().getMemorySize()); assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size()); assertEquals("Queue3's AM resource usage should be 0 MB memory", - 0, queue3.getAmResourceUsage().getMemory()); + 0, queue3.getAmResourceUsage().getMemorySize()); // AM4 can pass the fair share checking and it doesn't takes all // available VCore, but it need 5 VCores which are more than @@ -3466,11 +3466,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { scheduler.update(); scheduler.handle(updateEvent); assertEquals("Application4's AM resource shouldn't be updated", - 0, app4.getAMResource().getMemory()); + 0, app4.getAMResource().getMemorySize()); assertEquals("Application4's AM should not be running", 0, app4.getLiveContainers().size()); assertEquals("Queue3's AM resource usage should be 0 MB memory", - 0, queue3.getAmResourceUsage().getMemory()); + 0, queue3.getAmResourceUsage().getMemorySize()); } /** @@ -3552,11 +3552,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Allocate app1's AM container on node1. scheduler.handle(updateE1); assertEquals("Application1's AM requests 1024 MB memory", - 1024, app1.getAMResource().getMemory()); + 1024, app1.getAMResource().getMemorySize()); assertEquals("Application1's AM should be running", 1, app1.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 1024 MB memory", - 1024, queue1.getAmResourceUsage().getMemory()); + 1024, queue1.getAmResourceUsage().getMemorySize()); ApplicationAttemptId attId2 = createAppAttemptId(2, 1); createApplicationWithAMResource(attId2, "queue1", "user1", amResource2); @@ -3566,11 +3566,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Allocate app2's AM container on node2. scheduler.handle(updateE2); assertEquals("Application2's AM requests 1024 MB memory", - 1024, app2.getAMResource().getMemory()); + 1024, app2.getAMResource().getMemorySize()); assertEquals("Application2's AM should be running", 1, app2.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); ApplicationAttemptId attId3 = createAppAttemptId(3, 1); createApplicationWithAMResource(attId3, "queue1", "user1", amResource3); @@ -3583,11 +3583,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Similarly app3 reserves a container on node2. scheduler.handle(updateE2); assertEquals("Application3's AM resource shouldn't be updated", - 0, app3.getAMResource().getMemory()); + 0, app3.getAMResource().getMemorySize()); assertEquals("Application3's AM should not be running", 0, app3.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); ApplicationAttemptId attId4 = createAppAttemptId(4, 1); createApplicationWithAMResource(attId4, "queue1", "user1", amResource4); @@ -3598,21 +3598,21 @@ public class TestFairScheduler extends FairSchedulerTestBase { // app3 already reserved its container on node1. scheduler.handle(updateE1); assertEquals("Application4's AM resource shouldn't be updated", - 0, app4.getAMResource().getMemory()); + 0, app4.getAMResource().getMemorySize()); assertEquals("Application4's AM should not be running", 0, app4.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); scheduler.update(); // Allocate app4's AM container on node3. scheduler.handle(updateE3); assertEquals("Application4's AM requests 5120 MB memory", - 5120, app4.getAMResource().getMemory()); + 5120, app4.getAMResource().getMemorySize()); assertEquals("Application4's AM should be running", 1, app4.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 7168 MB memory", - 7168, queue1.getAmResourceUsage().getMemory()); + 7168, queue1.getAmResourceUsage().getMemorySize()); AppAttemptRemovedSchedulerEvent appRemovedEvent1 = new AppAttemptRemovedSchedulerEvent(attId1, @@ -3620,7 +3620,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Release app1's AM container on node1. scheduler.handle(appRemovedEvent1); assertEquals("Queue1's AM resource usage should be 6144 MB memory", - 6144, queue1.getAmResourceUsage().getMemory()); + 6144, queue1.getAmResourceUsage().getMemorySize()); ApplicationAttemptId attId5 = createAppAttemptId(5, 1); createApplicationWithAMResource(attId5, "queue1", "user1", amResource5); @@ -3632,11 +3632,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { // exceeding queue MaxAMShare limit. scheduler.handle(updateE1); assertEquals("Application5's AM requests 1024 MB memory", - 1024, app5.getAMResource().getMemory()); + 1024, app5.getAMResource().getMemorySize()); assertEquals("Application5's AM should be running", 1, app5.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 7168 MB memory", - 7168, queue1.getAmResourceUsage().getMemory()); + 7168, queue1.getAmResourceUsage().getMemorySize()); AppAttemptRemovedSchedulerEvent appRemovedEvent3 = new AppAttemptRemovedSchedulerEvent(attId3, @@ -3644,7 +3644,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Remove app3. scheduler.handle(appRemovedEvent3); assertEquals("Queue1's AM resource usage should be 7168 MB memory", - 7168, queue1.getAmResourceUsage().getMemory()); + 7168, queue1.getAmResourceUsage().getMemorySize()); ApplicationAttemptId attId6 = createAppAttemptId(6, 1); createApplicationWithAMResource(attId6, "queue1", "user1", amResource6); @@ -3655,11 +3655,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { // it exceeds queue MaxAMShare limit. scheduler.handle(updateE1); assertEquals("Application6's AM resource shouldn't be updated", - 0, app6.getAMResource().getMemory()); + 0, app6.getAMResource().getMemorySize()); assertEquals("Application6's AM should not be running", 0, app6.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 7168 MB memory", - 7168, queue1.getAmResourceUsage().getMemory()); + 7168, queue1.getAmResourceUsage().getMemorySize()); ApplicationAttemptId attId7 = createAppAttemptId(7, 1); createApplicationWithAMResource(attId7, "queue1", "user1", amResource7); @@ -3670,11 +3670,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { // app6 didn't reserve a container on node1. scheduler.handle(updateE1); assertEquals("Application7's AM requests 1024 MB memory", - 1024, app7.getAMResource().getMemory()); + 1024, app7.getAMResource().getMemorySize()); assertEquals("Application7's AM should be running", 1, app7.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 8192 MB memory", - 8192, queue1.getAmResourceUsage().getMemory()); + 8192, queue1.getAmResourceUsage().getMemorySize()); AppAttemptRemovedSchedulerEvent appRemovedEvent4 = new AppAttemptRemovedSchedulerEvent(attId4, @@ -3682,7 +3682,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Release app4's AM container on node3. scheduler.handle(appRemovedEvent4); assertEquals("Queue1's AM resource usage should be 3072 MB memory", - 3072, queue1.getAmResourceUsage().getMemory()); + 3072, queue1.getAmResourceUsage().getMemorySize()); AppAttemptRemovedSchedulerEvent appRemovedEvent5 = new AppAttemptRemovedSchedulerEvent(attId5, @@ -3690,7 +3690,7 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Release app5's AM container on node1. scheduler.handle(appRemovedEvent5); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); scheduler.update(); // app6 reserves a container on node1 because node1's available resource @@ -3709,21 +3709,21 @@ public class TestFairScheduler extends FairSchedulerTestBase { // app6 already reserved a container on node1. scheduler.handle(updateE1); assertEquals("Application8's AM resource shouldn't be updated", - 0, app8.getAMResource().getMemory()); + 0, app8.getAMResource().getMemorySize()); assertEquals("Application8's AM should not be running", 0, app8.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); scheduler.update(); // app8 can't allocate a container on node2 because // app6 already reserved a container on node2. scheduler.handle(updateE2); assertEquals("Application8's AM resource shouldn't be updated", - 0, app8.getAMResource().getMemory()); + 0, app8.getAMResource().getMemorySize()); assertEquals("Application8's AM should not be running", 0, app8.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 2048 MB memory", - 2048, queue1.getAmResourceUsage().getMemory()); + 2048, queue1.getAmResourceUsage().getMemorySize()); AppAttemptRemovedSchedulerEvent appRemovedEvent2 = new AppAttemptRemovedSchedulerEvent(attId2, @@ -3731,17 +3731,17 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Release app2's AM container on node2. scheduler.handle(appRemovedEvent2); assertEquals("Queue1's AM resource usage should be 1024 MB memory", - 1024, queue1.getAmResourceUsage().getMemory()); + 1024, queue1.getAmResourceUsage().getMemorySize()); scheduler.update(); // app6 turns the reservation into an allocation on node2. scheduler.handle(updateE2); assertEquals("Application6's AM requests 10240 MB memory", - 10240, app6.getAMResource().getMemory()); + 10240, app6.getAMResource().getMemorySize()); assertEquals("Application6's AM should be running", 1, app6.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 11264 MB memory", - 11264, queue1.getAmResourceUsage().getMemory()); + 11264, queue1.getAmResourceUsage().getMemorySize()); scheduler.update(); // app6 unreserve its container on node1 because @@ -3749,11 +3749,11 @@ public class TestFairScheduler extends FairSchedulerTestBase { // Now app8 can allocate its AM container on node1. scheduler.handle(updateE1); assertEquals("Application8's AM requests 1024 MB memory", - 1024, app8.getAMResource().getMemory()); + 1024, app8.getAMResource().getMemorySize()); assertEquals("Application8's AM should be running", 1, app8.getLiveContainers().size()); assertEquals("Queue1's AM resource usage should be 12288 MB memory", - 12288, queue1.getAmResourceUsage().getMemory()); + 12288, queue1.getAmResourceUsage().getMemorySize()); } @Test @@ -4450,19 +4450,19 @@ public class TestFairScheduler extends FairSchedulerTestBase { Resource usedResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getAllocatedResource(); - Assert.assertEquals(usedResource.getMemory(), 0); + Assert.assertEquals(usedResource.getMemorySize(), 0); Assert.assertEquals(usedResource.getVirtualCores(), 0); // Check total resource of scheduler node is also changed to 0 GB 0 core Resource totalResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getTotalResource(); - Assert.assertEquals(totalResource.getMemory(), 0 * GB); + Assert.assertEquals(totalResource.getMemorySize(), 0 * GB); Assert.assertEquals(totalResource.getVirtualCores(), 0); // Check the available resource is 0/0 Resource availableResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource(); - Assert.assertEquals(availableResource.getMemory(), 0); + Assert.assertEquals(availableResource.getMemorySize(), 0); Assert.assertEquals(availableResource.getVirtualCores(), 0); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java index ab8fcbc2b56..a79aacc196e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerFairShare.java @@ -109,10 +109,10 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { for (FSLeafQueue leaf : leafQueues) { if (leaf.getName().startsWith("root.parentA")) { - assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity, + assertEquals(0, (double) leaf.getFairShare().getMemorySize() / nodeCapacity, 0); } else if (leaf.getName().startsWith("root.parentB")) { - assertEquals(0, (double) leaf.getFairShare().getMemory() / nodeCapacity, + assertEquals(0, (double) leaf.getFairShare().getMemorySize() / nodeCapacity, 0); } } @@ -137,12 +137,12 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 100, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA1", false).getFairShare() - .getMemory() / nodeCapacity * 100, 0.1); + .getMemorySize() / nodeCapacity * 100, 0.1); assertEquals( 0, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA2", false).getFairShare() - .getMemory() / nodeCapacity, 0.1); + .getMemorySize() / nodeCapacity, 0.1); verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(), nodeCapacity); @@ -167,7 +167,7 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 33, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA" + i, false).getFairShare() - .getMemory() + .getMemorySize() / nodeCapacity * 100, .9); } @@ -200,7 +200,7 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 40, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA" + i, false).getFairShare() - .getMemory() + .getMemorySize() / nodeCapacity * 100, .9); } @@ -210,7 +210,7 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 10, (double) scheduler.getQueueManager() .getLeafQueue("root.parentB.childB1", false).getFairShare() - .getMemory() + .getMemorySize() / nodeCapacity * 100, .9); verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(), @@ -237,7 +237,7 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 50, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA" + i, false).getFairShare() - .getMemory() + .getMemorySize() / nodeCapacity * 100, .9); } // Let app under childA1 complete. This should cause the fair share @@ -254,13 +254,13 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 0, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA1", false).getFairShare() - .getMemory() + .getMemorySize() / nodeCapacity * 100, 0); assertEquals( 100, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA2", false).getFairShare() - .getMemory() + .getMemorySize() / nodeCapacity * 100, 0.1); verifySteadyFairShareMemory(scheduler.getQueueManager().getLeafQueues(), @@ -293,7 +293,7 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 40, (double) scheduler.getQueueManager() .getLeafQueue("root.parentA.childA" + i, false).getFairShare() - .getMemory() + .getMemorySize() / nodeMem * 100, .9); assertEquals( 40, @@ -308,7 +308,7 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { 10, (double) scheduler.getQueueManager() .getLeafQueue("root.parentB.childB1", false).getFairShare() - .getMemory() + .getMemorySize() / nodeMem * 100, .9); assertEquals( 10, @@ -322,13 +322,13 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { for (FSLeafQueue leaf : leafQueues) { if (leaf.getName().startsWith("root.parentA")) { assertEquals(0.2, - (double) leaf.getSteadyFairShare().getMemory() / nodeMem, 0.001); + (double) leaf.getSteadyFairShare().getMemorySize() / nodeMem, 0.001); assertEquals(0.2, (double) leaf.getSteadyFairShare().getVirtualCores() / nodeVCores, 0.001); } else if (leaf.getName().startsWith("root.parentB")) { assertEquals(0.05, - (double) leaf.getSteadyFairShare().getMemory() / nodeMem, 0.001); + (double) leaf.getSteadyFairShare().getMemorySize() / nodeMem, 0.001); assertEquals(0.1, (double) leaf.getSteadyFairShare().getVirtualCores() / nodeVCores, 0.001); @@ -348,11 +348,11 @@ public class TestFairSchedulerFairShare extends FairSchedulerTestBase { for (FSLeafQueue leaf : leafQueues) { if (leaf.getName().startsWith("root.parentA")) { assertEquals(0.2, - (double) leaf.getSteadyFairShare().getMemory() / nodeCapacity, + (double) leaf.getSteadyFairShare().getMemorySize() / nodeCapacity, 0.001); } else if (leaf.getName().startsWith("root.parentB")) { assertEquals(0.05, - (double) leaf.getSteadyFairShare().getMemory() / nodeCapacity, + (double) leaf.getSteadyFairShare().getMemorySize() / nodeCapacity, 0.001); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java index 2456594cfcd..07a2dcaf60d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerPreemption.java @@ -66,11 +66,11 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { private ControlledClock clock; private static class StubbedFairScheduler extends FairScheduler { - public int lastPreemptMemory = -1; + public long lastPreemptMemory = -1; @Override protected void preemptResources(Resource toPreempt) { - lastPreemptMemory = toPreempt.getMemory(); + lastPreemptMemory = toPreempt.getMemorySize(); } public void resetLastPreemptResources() { @@ -485,7 +485,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { scheduler.update(); Resource toPreempt = scheduler.resourceDeficit(scheduler.getQueueManager() .getLeafQueue("queueA.queueA2", false), clock.getTime()); - assertEquals(3277, toPreempt.getMemory()); + assertEquals(3277, toPreempt.getMemorySize()); // verify if the 3 containers required by queueA2 are preempted in the same // round @@ -616,18 +616,18 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { // share. clock.tickSec(6); assertEquals( - 1024, scheduler.resourceDeficit(schedC, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(schedC, clock.getTime()).getMemorySize()); assertEquals( - 1024, scheduler.resourceDeficit(schedD, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(schedD, clock.getTime()).getMemorySize()); // After fairSharePreemptionTime has passed, they should want to preempt // fair share. scheduler.update(); clock.tickSec(6); assertEquals( - 1536 , scheduler.resourceDeficit(schedC, clock.getTime()).getMemory()); + 1536 , scheduler.resourceDeficit(schedC, clock.getTime()).getMemorySize()); assertEquals( - 1536, scheduler.resourceDeficit(schedD, clock.getTime()).getMemory()); + 1536, scheduler.resourceDeficit(schedD, clock.getTime()).getMemorySize()); stopResourceManager(); } @@ -758,12 +758,12 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { // share. clock.tickSec(6); Resource res = scheduler.resourceDeficit(schedC, clock.getTime()); - assertEquals(1024, res.getMemory()); + assertEquals(1024, res.getMemorySize()); // Demand = 3 assertEquals(3, res.getVirtualCores()); res = scheduler.resourceDeficit(schedD, clock.getTime()); - assertEquals(1024, res.getMemory()); + assertEquals(1024, res.getMemorySize()); // Demand = 6, but min share = 2 assertEquals(2, res.getVirtualCores()); @@ -772,11 +772,11 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { scheduler.update(); clock.tickSec(6); res = scheduler.resourceDeficit(schedC, clock.getTime()); - assertEquals(1536, res.getMemory()); + assertEquals(1536, res.getMemorySize()); assertEquals(3, res.getVirtualCores()); res = scheduler.resourceDeficit(schedD, clock.getTime()); - assertEquals(1536, res.getMemory()); + assertEquals(1536, res.getMemorySize()); // Demand = 6, but fair share = 3 assertEquals(3, res.getVirtualCores()); stopResourceManager(); @@ -907,61 +907,61 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { scheduler.update(); clock.tickSec(6); assertEquals( - 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemorySize()); assertEquals( - 0, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory()); + 0, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemorySize()); assertEquals( - 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory()); + 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemorySize()); // After 10 seconds, queueB2 wants to preempt min share scheduler.update(); clock.tickSec(5); assertEquals( - 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemorySize()); assertEquals( - 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemorySize()); assertEquals( - 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory()); + 0, scheduler.resourceDeficit(queueC, clock.getTime()).getMemorySize()); // After 15 seconds, queueC wants to preempt min share scheduler.update(); clock.tickSec(5); assertEquals( - 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemorySize()); assertEquals( - 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemorySize()); assertEquals( - 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemorySize()); // After 20 seconds, queueB2 should want to preempt fair share scheduler.update(); clock.tickSec(5); assertEquals( - 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemorySize()); assertEquals( - 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory()); + 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemorySize()); assertEquals( - 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemorySize()); // After 25 seconds, queueB1 should want to preempt fair share scheduler.update(); clock.tickSec(5); assertEquals( - 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory()); + 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemorySize()); assertEquals( - 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory()); + 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemorySize()); assertEquals( - 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory()); + 1024, scheduler.resourceDeficit(queueC, clock.getTime()).getMemorySize()); // After 30 seconds, queueC should want to preempt fair share scheduler.update(); clock.tickSec(5); assertEquals( - 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemory()); + 1536, scheduler.resourceDeficit(queueB1, clock.getTime()).getMemorySize()); assertEquals( - 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemory()); + 1536, scheduler.resourceDeficit(queueB2, clock.getTime()).getMemorySize()); assertEquals( - 1536, scheduler.resourceDeficit(queueC, clock.getTime()).getMemory()); + 1536, scheduler.resourceDeficit(queueC, clock.getTime()).getMemorySize()); stopResourceManager(); } @@ -1087,7 +1087,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { // queueB to queueD clock.tickSec(6); assertEquals(2048, - scheduler.resourceDeficit(schedD, clock.getTime()).getMemory()); + scheduler.resourceDeficit(schedD, clock.getTime()).getMemorySize()); scheduler.preemptResources(Resources.createResource(2 * 1024)); // now only app2 is selected to be preempted @@ -1256,7 +1256,7 @@ public class TestFairSchedulerPreemption extends FairSchedulerTestBase { // After minSharePreemptionTime has passed, resource deficit is 2G clock.tickSec(6); assertEquals(2048, - scheduler.resourceDeficit(schedA, clock.getTime()).getMemory()); + scheduler.resourceDeficit(schedA, clock.getTime()).getMemorySize()); scheduler.preemptResources(Resources.createResource(2 * 1024)); // now none app is selected to be preempted diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java index 02c60cf7f71..84217c4c7d0 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java @@ -335,9 +335,9 @@ public class TestFifoScheduler { // SchedulerNode's total resource and available resource are changed. assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()) - .getTotalResource().getMemory()); + .getTotalResource().getMemorySize()); assertEquals(1024, scheduler.getNodeTracker().getNode(node0.getNodeID()). - getUnallocatedResource().getMemory(), 1024); + getUnallocatedResource().getMemorySize(), 1024); QueueInfo queueInfo = scheduler.getQueueInfo(null, false, false); Assert.assertEquals(0.0f, queueInfo.getCurrentCapacity(), 0.0f); @@ -693,7 +693,7 @@ public class TestFifoScheduler { am1.registerAppAttempt(); SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize()); RMApp app2 = rm.submitApp(2048); // kick the scheduling, 2GB given to AM, remaining 2 GB on nm2 @@ -703,7 +703,7 @@ public class TestFifoScheduler { am2.registerAppAttempt(); SchedulerNodeReport report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); - Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, GB, 1, 1); @@ -729,24 +729,24 @@ public class TestFifoScheduler { List allocated1 = alloc1Response.getAllocatedContainers(); Assert.assertEquals(1, allocated1.size()); - Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(1 * GB, allocated1.get(0).getResource().getMemorySize()); Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); List allocated2 = alloc2Response.getAllocatedContainers(); Assert.assertEquals(1, allocated2.size()); - Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemory()); + Assert.assertEquals(3 * GB, allocated2.get(0).getResource().getMemorySize()); Assert.assertEquals(nm1.getNodeId(), allocated2.get(0).getNodeId()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); report_nm2 = rm.getResourceScheduler().getNodeReport(nm2.getNodeId()); - Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemory()); + Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize()); + Assert.assertEquals(2 * GB, report_nm2.getAvailableResource().getMemorySize()); - Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemory()); + Assert.assertEquals(6 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(2 * GB, report_nm2.getUsedResource().getMemorySize()); Container c1 = allocated1.get(0); - Assert.assertEquals(GB, c1.getResource().getMemory()); + Assert.assertEquals(GB, c1.getResource().getMemorySize()); ContainerStatus containerStatus = BuilderUtils.newContainerStatus(c1.getId(), ContainerState.COMPLETE, "", 0, c1.getResource()); @@ -761,7 +761,7 @@ public class TestFifoScheduler { Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses() .size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(5 * GB, report_nm1.getUsedResource().getMemorySize()); rm.stop(); } @@ -818,7 +818,7 @@ public class TestFifoScheduler { int checkAlloc = conf.getInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB); - Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(checkAlloc, report_nm1.getUsedResource().getMemorySize()); rm.stop(); } @@ -1072,12 +1072,12 @@ public class TestFifoScheduler { Allocation allocation1 = fs.allocate(appAttemptId1, emptyAsk, emptyId, null, null, null, null); Assert.assertEquals("Allocation headroom", 1 * GB, allocation1 - .getResourceLimit().getMemory()); + .getResourceLimit().getMemorySize()); Allocation allocation2 = fs.allocate(appAttemptId2, emptyAsk, emptyId, null, null, null, null); Assert.assertEquals("Allocation headroom", 1 * GB, allocation2 - .getResourceLimit().getMemory()); + .getResourceLimit().getMemorySize()); rm.stop(); } @@ -1099,8 +1099,8 @@ public class TestFifoScheduler { SchedulerNodeReport report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 2 GB used and 2 GB available - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(2 * GB, report_nm1.getAvailableResource().getMemorySize()); // add request for containers am1.addRequests(new String[] { "127.0.0.1", "127.0.0.2" }, 2 * GB, 1, 1); @@ -1116,17 +1116,17 @@ public class TestFifoScheduler { List allocated1 = alloc1Response.getAllocatedContainers(); Assert.assertEquals(1, allocated1.size()); - Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemory()); + Assert.assertEquals(2 * GB, allocated1.get(0).getResource().getMemorySize()); Assert.assertEquals(nm1.getNodeId(), allocated1.get(0).getNodeId()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); // check node report, 4 GB used and 0 GB available - Assert.assertEquals(0, report_nm1.getAvailableResource().getMemory()); - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(0, report_nm1.getAvailableResource().getMemorySize()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize()); // check container is assigned with 2 GB. Container c1 = allocated1.get(0); - Assert.assertEquals(2 * GB, c1.getResource().getMemory()); + Assert.assertEquals(2 * GB, c1.getResource().getMemorySize()); // update node resource to 2 GB, so resource is over-consumed. Map nodeResourceMap = @@ -1141,7 +1141,7 @@ public class TestFifoScheduler { while (waitCount++ != 20) { report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); if (null != report_nm1 && - report_nm1.getAvailableResource().getMemory() != 0) { + report_nm1.getAvailableResource().getMemorySize() != 0) { break; } LOG.info("Waiting for RMNodeResourceUpdateEvent to be handled... Tried " @@ -1150,8 +1150,9 @@ public class TestFifoScheduler { } // Now, the used resource is still 4 GB, and available resource is minus // value. - Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemory()); - Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemory()); + report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); + Assert.assertEquals(4 * GB, report_nm1.getUsedResource().getMemorySize()); + Assert.assertEquals(-2 * GB, report_nm1.getAvailableResource().getMemorySize()); // Check container can complete successfully in case of resource // over-commitment. @@ -1169,9 +1170,9 @@ public class TestFifoScheduler { Assert.assertEquals(1, am1.schedule().getCompletedContainersStatuses() .size()); report_nm1 = rm.getResourceScheduler().getNodeReport(nm1.getNodeId()); - Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemory()); + Assert.assertEquals(2 * GB, report_nm1.getUsedResource().getMemorySize()); // As container return 2 GB back, the available resource becomes 0 again. - Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemory()); + Assert.assertEquals(0 * GB, report_nm1.getAvailableResource().getMemorySize()); rm.stop(); } @@ -1248,30 +1249,30 @@ public class TestFifoScheduler { Resource usedResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getAllocatedResource(); - Assert.assertEquals(usedResource.getMemory(), 1 * GB); + Assert.assertEquals(usedResource.getMemorySize(), 1 * GB); Assert.assertEquals(usedResource.getVirtualCores(), 1); // Check total resource of scheduler node is also changed to 1 GB 1 core Resource totalResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getTotalResource(); - Assert.assertEquals(totalResource.getMemory(), 1 * GB); + Assert.assertEquals(totalResource.getMemorySize(), 1 * GB); Assert.assertEquals(totalResource.getVirtualCores(), 1); // Check the available resource is 0/0 Resource availableResource = resourceManager.getResourceScheduler() .getSchedulerNode(nm_0.getNodeId()).getUnallocatedResource(); - Assert.assertEquals(availableResource.getMemory(), 0); + Assert.assertEquals(availableResource.getMemorySize(), 0); Assert.assertEquals(availableResource.getVirtualCores(), 0); } private void checkApplicationResourceUsage(int expected, Application application) { - Assert.assertEquals(expected, application.getUsedResources().getMemory()); + Assert.assertEquals(expected, application.getUsedResources().getMemorySize()); } private void checkNodeResourceUsage(int expected, org.apache.hadoop.yarn.server.resourcemanager.NodeManager node) { - Assert.assertEquals(expected, node.getUsed().getMemory()); + Assert.assertEquals(expected, node.getUsed().getMemorySize()); node.checkResourceUsage(); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java index d2efd2045b3..9ba5233f6d1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java @@ -1410,7 +1410,7 @@ public class TestRMWebServicesApps extends JerseyTestBase { assertEquals("clusterUsagePerc doesn't match", 50.0f, clusterUsagePerc, 0.01f); assertEquals("numContainers doesn't match", 1, numContainers); assertEquals("preemptedResourceMB doesn't match", app - .getRMAppMetrics().getResourcePreempted().getMemory(), + .getRMAppMetrics().getResourcePreempted().getMemorySize(), preemptedResourceMB); assertEquals("preemptedResourceVCores doesn't match", app .getRMAppMetrics().getResourcePreempted().getVirtualCores(), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java index 50d4e0460ec..5bcb0e02bcc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java @@ -778,9 +778,9 @@ public class TestRMWebServicesNodes extends JerseyTestBase { assertEquals("numContainers doesn't match: " + numContainers, report.getNumContainers(), numContainers); assertEquals("usedMemoryMB doesn't match: " + usedMemoryMB, report - .getUsedResource().getMemory(), usedMemoryMB); + .getUsedResource().getMemorySize(), usedMemoryMB); assertEquals("availMemoryMB doesn't match: " + availMemoryMB, report - .getAvailableResource().getMemory(), availMemoryMB); + .getAvailableResource().getMemorySize(), availMemoryMB); assertEquals("usedVirtualCores doesn't match: " + usedVirtualCores, report .getUsedResource().getVirtualCores(), usedVirtualCores); assertEquals("availVirtualCores doesn't match: " + availVirtualCores, report