diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 037cebf1734..43a47ae65fe 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -95,6 +95,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSe import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator; import org.apache.hadoop.yarn.util.resource.DominantResourceCalculator; import org.apache.hadoop.yarn.util.resource.ResourceCalculator; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; import org.apache.hadoop.yarn.util.resource.Resources; import java.io.IOException; @@ -1421,7 +1422,7 @@ public class FairScheduler extends } if (continuousSchedulingEnabled) { - // Contiuous scheduling is deprecated log it on startup + // Continuous scheduling is deprecated log it on startup LOG.warn("Continuous scheduling is turned ON. It is deprecated " + "because it can cause scheduler slowness due to locking issues. " + "Schedulers should use assignmultiple as a replacement."); @@ -1534,6 +1535,12 @@ public class FairScheduler extends } catch (Exception e) { LOG.error("Failed to reload allocations file", e); } + try { + refreshMaximumAllocation( + ResourceUtils.fetchMaximumAllocationFromConfig(conf)); + } catch (Exception e) { + LOG.error("Failed to refresh maximum allocation", e); + } } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerWithMultiResourceTypes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerWithMultiResourceTypes.java new file mode 100644 index 00000000000..f9fcf5328f0 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairSchedulerWithMultiResourceTypes.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * http://www.apache.org/licenses/LICENSE-2.0 + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.yarn.api.protocolrecords.ResourceTypes; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.ResourceInformation; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.util.resource.ResourceUtils; +import org.junit.After; +import org.junit.Before; +import org.junit.Test; + +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import static org.apache.hadoop.yarn.util.resource.ResourceUtils.MAXIMUM_ALLOCATION; +import static org.apache.hadoop.yarn.util.resource.ResourceUtils.UNITS; +import static org.junit.Assert.assertEquals; + +public class TestFairSchedulerWithMultiResourceTypes + extends FairSchedulerTestBase { + + private static final String CUSTOM_RESOURCE = "custom-resource"; + + @Before + public void setUp() throws IOException { + scheduler = new FairScheduler(); + conf = createConfiguration(); + initResourceTypes(conf); + } + + @After + public void tearDown() { + if (scheduler != null) { + scheduler.stop(); + scheduler = null; + } + } + + private Configuration initResourceTypes(Configuration conf) { + Map riMap = new HashMap<>(); + + // Initialize mandatory resources + ResourceInformation memory = + ResourceInformation.newInstance(ResourceInformation.MEMORY_MB.getName(), + ResourceInformation.MEMORY_MB.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_MB, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_MB); + ResourceInformation vcores = + ResourceInformation.newInstance(ResourceInformation.VCORES.getName(), + ResourceInformation.VCORES.getUnits(), + YarnConfiguration.DEFAULT_RM_SCHEDULER_MINIMUM_ALLOCATION_VCORES, + YarnConfiguration.DEFAULT_RM_SCHEDULER_MAXIMUM_ALLOCATION_VCORES); + riMap.put(ResourceInformation.MEMORY_URI, memory); + riMap.put(ResourceInformation.VCORES_URI, vcores); + riMap.put(CUSTOM_RESOURCE, ResourceInformation.newInstance(CUSTOM_RESOURCE, + "", 0, ResourceTypes.COUNTABLE, 0, 3333L)); + + ResourceUtils.initializeResourcesFromResourceInformationMap(riMap); + + return conf; + } + + @Test + public void testMaximumAllocationRefresh() throws IOException { + conf.set(YarnConfiguration.RESOURCE_TYPES, CUSTOM_RESOURCE); + conf.set(YarnConfiguration.RESOURCE_TYPES + "." + CUSTOM_RESOURCE + UNITS, + "k"); + conf.setInt(YarnConfiguration.RESOURCE_TYPES + "." + CUSTOM_RESOURCE + + MAXIMUM_ALLOCATION, 10000); + conf.setInt(YarnConfiguration.RESOURCE_TYPES + "." + + ResourceInformation.VCORES.getName() + MAXIMUM_ALLOCATION, 4); + conf.setInt( + YarnConfiguration.RESOURCE_TYPES + "." + + ResourceInformation.MEMORY_MB.getName() + MAXIMUM_ALLOCATION, + 512); + scheduler.init(conf); + scheduler.reinitialize(conf, null); + + Resource maxAllowedAllocation = + scheduler.getNodeTracker().getMaxAllowedAllocation(); + ResourceInformation customResource = + maxAllowedAllocation.getResourceInformation(CUSTOM_RESOURCE); + assertEquals(512, maxAllowedAllocation.getMemorySize()); + assertEquals(4, maxAllowedAllocation.getVirtualCores()); + assertEquals(10000, customResource.getValue()); + + conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RESOURCE_TYPES, CUSTOM_RESOURCE); + conf.set(YarnConfiguration.RESOURCE_TYPES + "." + CUSTOM_RESOURCE + UNITS, + "k"); + conf.setInt(YarnConfiguration.RESOURCE_TYPES + "." + CUSTOM_RESOURCE + + MAXIMUM_ALLOCATION, 20000); + conf.setInt(YarnConfiguration.RESOURCE_TYPES + "." + + ResourceInformation.VCORES.getName() + MAXIMUM_ALLOCATION, 8); + conf.setInt( + YarnConfiguration.RESOURCE_TYPES + "." + + ResourceInformation.MEMORY_MB.getName() + MAXIMUM_ALLOCATION, + 2048); + scheduler.reinitialize(conf, null); + + maxAllowedAllocation = scheduler.getNodeTracker().getMaxAllowedAllocation(); + customResource = + maxAllowedAllocation.getResourceInformation(CUSTOM_RESOURCE); + assertEquals(2048, maxAllowedAllocation.getMemorySize()); + assertEquals(8, maxAllowedAllocation.getVirtualCores()); + assertEquals(20000, customResource.getValue()); + } + +}