YARN-408. Change CapacityScheduler to not disable delay-scheduling by default. Contributed by Mayank Bansal.

svn merge --ignore-ancestry -c 1550245 ../../trunk/


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1550246 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinod Kumar Vavilapalli 2013-12-11 20:15:47 +00:00
parent 8e416ea685
commit 05c923a455
4 changed files with 17 additions and 7 deletions

View File

@ -141,6 +141,9 @@ Release 2.4.0 - UNRELEASED
YARN-1491. Upgrade JUnit3 TestCase to JUnit 4 (Chen He via jeagles) YARN-1491. Upgrade JUnit3 TestCase to JUnit 4 (Chen He via jeagles)
YARN-408. Change CapacityScheduler to not disable delay-scheduling by default.
(Mayank Bansal via vinodkv)
OPTIMIZATIONS OPTIMIZATIONS
BUG FIXES BUG FIXES

View File

@ -99,12 +99,12 @@
<property> <property>
<name>yarn.scheduler.capacity.node-locality-delay</name> <name>yarn.scheduler.capacity.node-locality-delay</name>
<value>-1</value> <value>40</value>
<description> <description>
Number of missed scheduling opportunities after which the CapacityScheduler Number of missed scheduling opportunities after which the CapacityScheduler
attempts to schedule rack-local containers. attempts to schedule rack-local containers.
Typically this should be set to number of racks in the cluster, this Typically this should be set to number of nodes in the cluster, By default is setting
feature is disabled by default, set to -1. approximately number of nodes in one rack which is 40.
</description> </description>
</property> </property>

View File

@ -22,6 +22,8 @@
import java.util.HashMap; import java.util.HashMap;
import java.util.List; import java.util.List;
import javax.security.auth.login.Configuration;
import junit.framework.Assert; import junit.framework.Assert;
import org.apache.commons.logging.Log; import org.apache.commons.logging.Log;
@ -88,7 +90,9 @@ public void testAppWithNoContainers() throws Exception {
public void testAppOnMultiNode() throws Exception { public void testAppOnMultiNode() throws Exception {
Logger rootLogger = LogManager.getRootLogger(); Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG); rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM(); YarnConfiguration conf = new YarnConfiguration();
conf.set("yarn.scheduler.capacity.node-locality-delay", "-1");
MockRM rm = new MockRM(conf);
rm.start(); rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5120); MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:5678", 10240); MockNM nm2 = rm.registerNode("h2:5678", 10240);

View File

@ -1066,6 +1066,9 @@ public void testStolenReservedContainer() throws Exception {
assertEquals(2*GB, a.getMetrics().getAllocatedMB()); assertEquals(2*GB, a.getMetrics().getAllocatedMB());
// node_1 heartbeats in and gets the DEFAULT_RACK request for app_1 // node_1 heartbeats in and gets the DEFAULT_RACK request for app_1
// We do not need locality delay here
doReturn(-1).when(a).getNodeLocalityDelay();
a.assignContainers(clusterResource, node_1); a.assignContainers(clusterResource, node_1);
assertEquals(10*GB, a.getUsedResources().getMemory()); assertEquals(10*GB, a.getUsedResources().getMemory());
assertEquals(2*GB, app_0.getCurrentConsumption().getMemory()); assertEquals(2*GB, app_0.getCurrentConsumption().getMemory());
@ -1649,7 +1652,7 @@ public void testNodeLocalityAfterQueueRefresh() throws Exception {
LeafQueue e = stubLeafQueue((LeafQueue)queues.get(E)); LeafQueue e = stubLeafQueue((LeafQueue)queues.get(E));
// before reinitialization // before reinitialization
assertEquals(0, e.getNodeLocalityDelay()); assertEquals(40, e.getNodeLocalityDelay());
csConf.setInt(CapacitySchedulerConfiguration csConf.setInt(CapacitySchedulerConfiguration
.NODE_LOCALITY_DELAY, 60); .NODE_LOCALITY_DELAY, 60);
@ -1932,10 +1935,10 @@ public void testLocalityConstraints() throws Exception {
// Now, should allocate since RR(rack_1) = relax: true // Now, should allocate since RR(rack_1) = relax: true
a.assignContainers(clusterResource, node_1_1); a.assignContainers(clusterResource, node_1_1);
verify(app_0).allocate(eq(NodeType.RACK_LOCAL), eq(node_1_1), verify(app_0,never()).allocate(eq(NodeType.RACK_LOCAL), eq(node_1_1),
any(Priority.class), any(ResourceRequest.class), any(Container.class)); any(Priority.class), any(ResourceRequest.class), any(Container.class));
assertEquals(0, app_0.getSchedulingOpportunities(priority)); assertEquals(0, app_0.getSchedulingOpportunities(priority));
assertEquals(0, app_0.getTotalRequiredResources(priority)); assertEquals(1, app_0.getTotalRequiredResources(priority));
// Now sanity-check node_local // Now sanity-check node_local
app_0_requests_0.add( app_0_requests_0.add(