YARN-5575. Many classes use bare yarn. properties instead of the defined constants. Contributed by Daniel Templeton.
(cherry picked from commit d3bb69a667
)
This commit is contained in:
parent
a071e33a91
commit
e29d0a2fd8
|
@ -33,6 +33,7 @@ import org.apache.hadoop.mapred.JobStatus;
|
||||||
import org.apache.hadoop.mapreduce.MRConfig;
|
import org.apache.hadoop.mapreduce.MRConfig;
|
||||||
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
|
import org.apache.hadoop.mapreduce.v2.MiniMRYarnCluster;
|
||||||
import org.apache.hadoop.net.StandardSocketFactory;
|
import org.apache.hadoop.net.StandardSocketFactory;
|
||||||
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
|
@ -87,9 +88,9 @@ public class TestMRCJCSocketFactory {
|
||||||
jconf.set("hadoop.rpc.socket.factory.class.default",
|
jconf.set("hadoop.rpc.socket.factory.class.default",
|
||||||
"org.apache.hadoop.ipc.DummySocketFactory");
|
"org.apache.hadoop.ipc.DummySocketFactory");
|
||||||
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
|
jconf.set(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
|
||||||
String rmAddress = jconf.get("yarn.resourcemanager.address");
|
String rmAddress = jconf.get(YarnConfiguration.RM_ADDRESS);
|
||||||
String[] split = rmAddress.split(":");
|
String[] split = rmAddress.split(":");
|
||||||
jconf.set("yarn.resourcemanager.address", split[0] + ':'
|
jconf.set(YarnConfiguration.RM_ADDRESS, split[0] + ':'
|
||||||
+ (Integer.parseInt(split[1]) + 10));
|
+ (Integer.parseInt(split[1]) + 10));
|
||||||
client = new JobClient(jconf);
|
client = new JobClient(jconf);
|
||||||
|
|
||||||
|
|
|
@ -163,7 +163,7 @@ public class TestMRJobs {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
|
conf.set("fs.defaultFS", remoteFs.getUri().toString()); // use HDFS
|
||||||
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir");
|
conf.set(MRJobConfig.MR_AM_STAGING_DIR, "/apps_staging_dir");
|
||||||
conf.setInt("yarn.cluster.max-application-priority", 10);
|
conf.setInt(YarnConfiguration.MAX_CLUSTER_LEVEL_APPLICATION_PRIORITY, 10);
|
||||||
mrCluster.init(conf);
|
mrCluster.init(conf);
|
||||||
mrCluster.start();
|
mrCluster.start();
|
||||||
}
|
}
|
||||||
|
@ -418,8 +418,7 @@ public class TestMRJobs {
|
||||||
// set master address to local to test that local mode applied if framework
|
// set master address to local to test that local mode applied if framework
|
||||||
// equals local
|
// equals local
|
||||||
sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
|
sleepConf.set(MRConfig.MASTER_ADDRESS, "local");
|
||||||
sleepConf
|
sleepConf.setInt(MRJobConfig.MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS, 5);
|
||||||
.setInt("yarn.app.mapreduce.am.scheduler.heartbeat.interval-ms", 5);
|
|
||||||
|
|
||||||
SleepJob sleepJob = new SleepJob();
|
SleepJob sleepJob = new SleepJob();
|
||||||
sleepJob.setConf(sleepConf);
|
sleepJob.setConf(sleepConf);
|
||||||
|
|
|
@ -11,6 +11,7 @@ import org.apache.hadoop.mapred.MiniMRClientClusterFactory;
|
||||||
import org.apache.hadoop.mapreduce.MRJobConfig;
|
import org.apache.hadoop.mapreduce.MRJobConfig;
|
||||||
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
|
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||||
|
|
||||||
import java.io.IOException;
|
import java.io.IOException;
|
||||||
|
|
||||||
|
@ -45,8 +46,8 @@ public class GridmixTestUtils {
|
||||||
Configuration conf = new Configuration();
|
Configuration conf = new Configuration();
|
||||||
// conf.set("mapred.queue.names", "default,q1,q2");
|
// conf.set("mapred.queue.names", "default,q1,q2");
|
||||||
conf.set("mapred.queue.names", "default");
|
conf.set("mapred.queue.names", "default");
|
||||||
conf.set("yarn.scheduler.capacity.root.queues", "default");
|
conf.set(PREFIX + "root.queues", "default");
|
||||||
conf.set("yarn.scheduler.capacity.root.default.capacity", "100.0");
|
conf.set(PREFIX + "root.default.capacity", "100.0");
|
||||||
|
|
||||||
|
|
||||||
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
|
conf.setBoolean(GRIDMIX_USE_QUEUE_IN_TRACE, false);
|
||||||
|
|
|
@ -172,7 +172,8 @@ public class TestDistributedShell {
|
||||||
throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
|
throw new RuntimeException("Could not find 'yarn-site.xml' dummy file in classpath");
|
||||||
}
|
}
|
||||||
Configuration yarnClusterConfig = yarnCluster.getConfig();
|
Configuration yarnClusterConfig = yarnCluster.getConfig();
|
||||||
yarnClusterConfig.set("yarn.application.classpath", new File(url.getPath()).getParent());
|
yarnClusterConfig.set(YarnConfiguration.YARN_APPLICATION_CLASSPATH,
|
||||||
|
new File(url.getPath()).getParent());
|
||||||
//write the document to a buffer (not directly to the file, as that
|
//write the document to a buffer (not directly to the file, as that
|
||||||
//can cause the file being written to get read -which will then fail.
|
//can cause the file being written to get read -which will then fail.
|
||||||
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
|
ByteArrayOutputStream bytesOut = new ByteArrayOutputStream();
|
||||||
|
@ -186,8 +187,7 @@ public class TestDistributedShell {
|
||||||
FileContext fsContext = FileContext.getLocalFSFileContext();
|
FileContext fsContext = FileContext.getLocalFSFileContext();
|
||||||
fsContext
|
fsContext
|
||||||
.delete(
|
.delete(
|
||||||
new Path(conf
|
new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH)),
|
||||||
.get("yarn.timeline-service.leveldb-timeline-store.path")),
|
|
||||||
true);
|
true);
|
||||||
try {
|
try {
|
||||||
Thread.sleep(2000);
|
Thread.sleep(2000);
|
||||||
|
@ -215,8 +215,7 @@ public class TestDistributedShell {
|
||||||
FileContext fsContext = FileContext.getLocalFSFileContext();
|
FileContext fsContext = FileContext.getLocalFSFileContext();
|
||||||
fsContext
|
fsContext
|
||||||
.delete(
|
.delete(
|
||||||
new Path(conf
|
new Path(conf.get(YarnConfiguration.TIMELINE_SERVICE_LEVELDB_PATH)),
|
||||||
.get("yarn.timeline-service.leveldb-timeline-store.path")),
|
|
||||||
true);
|
true);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.api.records.NodeId;
|
||||||
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
|
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
import org.apache.hadoop.yarn.server.resourcemanager.nodelabels.RMNodeLabelsManager;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -57,13 +58,12 @@ public class TestDistributedShellWithNodeLabels {
|
||||||
labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(labels);
|
labelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(labels);
|
||||||
|
|
||||||
// Setup queue access to node labels
|
// Setup queue access to node labels
|
||||||
distShellTest.conf.set("yarn.scheduler.capacity.root.accessible-node-labels", "x");
|
distShellTest.conf.set(PREFIX + "root.accessible-node-labels", "x");
|
||||||
distShellTest.conf.set("yarn.scheduler.capacity.root.accessible-node-labels.x.capacity",
|
distShellTest.conf.set(PREFIX + "root.accessible-node-labels.x.capacity",
|
||||||
"100");
|
|
||||||
distShellTest.conf.set("yarn.scheduler.capacity.root.default.accessible-node-labels", "x");
|
|
||||||
distShellTest.conf.set(
|
|
||||||
"yarn.scheduler.capacity.root.default.accessible-node-labels.x.capacity",
|
|
||||||
"100");
|
"100");
|
||||||
|
distShellTest.conf.set(PREFIX + "root.default.accessible-node-labels", "x");
|
||||||
|
distShellTest.conf.set(PREFIX
|
||||||
|
+ "root.default.accessible-node-labels.x.capacity", "100");
|
||||||
|
|
||||||
rmContext.getScheduler().reinitialize(distShellTest.conf, rmContext);
|
rmContext.getScheduler().reinitialize(distShellTest.conf, rmContext);
|
||||||
|
|
||||||
|
|
|
@ -80,6 +80,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.Capacity
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
|
||||||
import org.apache.hadoop.yarn.util.Records;
|
import org.apache.hadoop.yarn.util.Records;
|
||||||
import org.apache.hadoop.yarn.util.Times;
|
import org.apache.hadoop.yarn.util.Times;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -1704,8 +1705,7 @@ public class TestYarnCLI {
|
||||||
"org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
|
"org.apache.hadoop.yarn.server.resourcemanager.monitor.capacity."
|
||||||
+ "ProportionalCapacityPreemptionPolicy");
|
+ "ProportionalCapacityPreemptionPolicy");
|
||||||
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
|
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
|
||||||
conf.setBoolean(
|
conf.setBoolean(PREFIX + "root.a.a1.disable_preemption", true);
|
||||||
"yarn.scheduler.capacity.root.a.a1.disable_preemption", true);
|
|
||||||
MiniYARNCluster cluster =
|
MiniYARNCluster cluster =
|
||||||
new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
|
new MiniYARNCluster("testReservationAPIs", 2, 1, 1);
|
||||||
|
|
||||||
|
|
|
@ -23,6 +23,7 @@ import org.junit.Assert;
|
||||||
import org.apache.hadoop.conf.Configuration;
|
import org.apache.hadoop.conf.Configuration;
|
||||||
import org.apache.hadoop.http.FilterContainer;
|
import org.apache.hadoop.http.FilterContainer;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
|
import static org.apache.hadoop.yarn.server.timeline.security.TimelineAuthenticationFilterInitializer.PREFIX;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.mockito.Mockito;
|
import org.mockito.Mockito;
|
||||||
|
|
||||||
|
@ -35,36 +36,30 @@ public class TestTimelineAuthenticationFilterInitializer {
|
||||||
for (int i = 0; i < 3; ++i) {
|
for (int i = 0; i < 3; ++i) {
|
||||||
Configuration conf = new YarnConfiguration();
|
Configuration conf = new YarnConfiguration();
|
||||||
switch (i) {
|
switch (i) {
|
||||||
case 0:
|
case 0:
|
||||||
// hadoop.proxyuser prefix
|
// hadoop.proxyuser prefix
|
||||||
conf.set("hadoop.proxyuser.foo.hosts", "*");
|
conf.set("hadoop.proxyuser.foo.hosts", "*");
|
||||||
conf.set("hadoop.proxyuser.foo.users", "*");
|
conf.set("hadoop.proxyuser.foo.users", "*");
|
||||||
conf.set("hadoop.proxyuser.foo.groups", "*");
|
conf.set("hadoop.proxyuser.foo.groups", "*");
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
// yarn.timeline-service.http-authentication.proxyuser prefix
|
// yarn.timeline-service.http-authentication.proxyuser prefix
|
||||||
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.hosts",
|
conf.set(PREFIX + "proxyuser.foo.hosts", "*");
|
||||||
"*");
|
conf.set(PREFIX + "proxyuser.foo.users", "*");
|
||||||
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.users",
|
conf.set(PREFIX + "proxyuser.foo.groups", "*");
|
||||||
"*");
|
break;
|
||||||
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.groups",
|
case 2:
|
||||||
"*");
|
// hadoop.proxyuser prefix has been overwritten by
|
||||||
break;
|
// yarn.timeline-service.http-authentication.proxyuser prefix
|
||||||
case 2:
|
conf.set("hadoop.proxyuser.foo.hosts", "bar");
|
||||||
// hadoop.proxyuser prefix has been overwritten by
|
conf.set("hadoop.proxyuser.foo.users", "bar");
|
||||||
// yarn.timeline-service.http-authentication.proxyuser prefix
|
conf.set("hadoop.proxyuser.foo.groups", "bar");
|
||||||
conf.set("hadoop.proxyuser.foo.hosts", "bar");
|
conf.set(PREFIX + "proxyuser.foo.hosts", "*");
|
||||||
conf.set("hadoop.proxyuser.foo.users", "bar");
|
conf.set(PREFIX + "proxyuser.foo.users", "*");
|
||||||
conf.set("hadoop.proxyuser.foo.groups", "bar");
|
conf.set(PREFIX + "proxyuser.foo.groups", "*");
|
||||||
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.hosts",
|
break;
|
||||||
"*");
|
default:
|
||||||
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.users",
|
break;
|
||||||
"*");
|
|
||||||
conf.set("yarn.timeline-service.http-authentication.proxyuser.foo.groups",
|
|
||||||
"*");
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
TimelineAuthenticationFilterInitializer initializer =
|
TimelineAuthenticationFilterInitializer initializer =
|
||||||
|
|
|
@ -549,8 +549,8 @@ public class ReservationACLsTestBase extends ACLsTestBase {
|
||||||
csConf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
|
csConf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
|
||||||
csConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
csConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||||
csConf.setBoolean(YarnConfiguration.YARN_RESERVATION_ACL_ENABLE, true);
|
csConf.setBoolean(YarnConfiguration.YARN_RESERVATION_ACL_ENABLE, true);
|
||||||
csConf.set("yarn.resourcemanager.scheduler.class", CapacityScheduler
|
csConf.set(YarnConfiguration.RM_SCHEDULER,
|
||||||
.class.getName());
|
CapacityScheduler.class.getName());
|
||||||
|
|
||||||
return csConf;
|
return csConf;
|
||||||
}
|
}
|
||||||
|
@ -602,8 +602,7 @@ public class ReservationACLsTestBase extends ACLsTestBase {
|
||||||
fsConf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
|
fsConf.setBoolean(YarnConfiguration.RM_RESERVATION_SYSTEM_ENABLE, true);
|
||||||
fsConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
fsConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||||
fsConf.setBoolean(YarnConfiguration.YARN_RESERVATION_ACL_ENABLE, true);
|
fsConf.setBoolean(YarnConfiguration.YARN_RESERVATION_ACL_ENABLE, true);
|
||||||
fsConf.set("yarn.resourcemanager.scheduler.class", FairScheduler.class
|
fsConf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
|
||||||
.getName());
|
|
||||||
|
|
||||||
return fsConf;
|
return fsConf;
|
||||||
}
|
}
|
||||||
|
|
|
@ -79,6 +79,7 @@ import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
|
||||||
import org.apache.hadoop.yarn.util.Records;
|
import org.apache.hadoop.yarn.util.Records;
|
||||||
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
|
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
|
||||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -286,12 +287,11 @@ public class TestAppManager{
|
||||||
YarnConfiguration conf = new YarnConfiguration();
|
YarnConfiguration conf = new YarnConfiguration();
|
||||||
conf.set(YarnConfiguration.RM_SCHEDULER,
|
conf.set(YarnConfiguration.RM_SCHEDULER,
|
||||||
CapacityScheduler.class.getCanonicalName());
|
CapacityScheduler.class.getCanonicalName());
|
||||||
conf.set("yarn.scheduler.capacity.root.acl_submit_applications", " ");
|
conf.set(PREFIX + "root.acl_submit_applications", " ");
|
||||||
conf.set("yarn.scheduler.capacity.root.acl_administer_queue", " ");
|
conf.set(PREFIX + "root.acl_administer_queue", " ");
|
||||||
|
|
||||||
conf.set("yarn.scheduler.capacity.root.default.acl_submit_applications",
|
conf.set(PREFIX + "root.default.acl_submit_applications", " ");
|
||||||
" ");
|
conf.set(PREFIX + "root.default.acl_administer_queue", " ");
|
||||||
conf.set("yarn.scheduler.capacity.root.default.acl_administer_queue", " ");
|
|
||||||
conf.set(YarnConfiguration.YARN_ACL_ENABLE, "true");
|
conf.set(YarnConfiguration.YARN_ACL_ENABLE, "true");
|
||||||
MockRM mockRM = new MockRM(conf);
|
MockRM mockRM = new MockRM(conf);
|
||||||
ClientRMService rmService = mockRM.getClientRMService();
|
ClientRMService rmService = mockRM.getClientRMService();
|
||||||
|
|
|
@ -69,6 +69,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptE
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
||||||
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
|
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
|
||||||
import org.apache.log4j.Level;
|
import org.apache.log4j.Level;
|
||||||
import org.apache.log4j.LogManager;
|
import org.apache.log4j.LogManager;
|
||||||
|
@ -136,7 +137,7 @@ public class TestRM extends ParameterizedSchedulerTestBase {
|
||||||
public void testAppOnMultiNode() throws Exception {
|
public void testAppOnMultiNode() throws Exception {
|
||||||
Logger rootLogger = LogManager.getRootLogger();
|
Logger rootLogger = LogManager.getRootLogger();
|
||||||
rootLogger.setLevel(Level.DEBUG);
|
rootLogger.setLevel(Level.DEBUG);
|
||||||
conf.set("yarn.scheduler.capacity.node-locality-delay", "-1");
|
conf.set(CapacitySchedulerConfiguration.NODE_LOCALITY_DELAY, "-1");
|
||||||
MockRM rm = new MockRM(conf);
|
MockRM rm = new MockRM(conf);
|
||||||
rm.start();
|
rm.start();
|
||||||
MockNM nm1 = rm.registerNode("h1:1234", 5120);
|
MockNM nm1 = rm.registerNode("h1:1234", 5120);
|
||||||
|
|
|
@ -68,6 +68,9 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
|
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration;
|
||||||
|
import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_PROXY_USER_PREFIX;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration.NODES;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.resource.DynamicResourceConfiguration.PREFIX;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
|
@ -165,7 +168,8 @@ public class TestRMAdminService {
|
||||||
|
|
||||||
CapacitySchedulerConfiguration csConf =
|
CapacitySchedulerConfiguration csConf =
|
||||||
new CapacitySchedulerConfiguration();
|
new CapacitySchedulerConfiguration();
|
||||||
csConf.set("yarn.scheduler.capacity.maximum-applications", "5000");
|
csConf.set(CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS,
|
||||||
|
"5000");
|
||||||
uploadConfiguration(csConf, "capacity-scheduler.xml");
|
uploadConfiguration(csConf, "capacity-scheduler.xml");
|
||||||
|
|
||||||
rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
|
rm.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
|
||||||
|
@ -226,9 +230,9 @@ public class TestRMAdminService {
|
||||||
|
|
||||||
DynamicResourceConfiguration drConf =
|
DynamicResourceConfiguration drConf =
|
||||||
new DynamicResourceConfiguration();
|
new DynamicResourceConfiguration();
|
||||||
drConf.set("yarn.resource.dynamic.nodes", "h1:1234");
|
drConf.set(PREFIX + NODES, "h1:1234");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.vcores", "4");
|
drConf.set(PREFIX + "h1:1234.vcores", "4");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.memory", "4096");
|
drConf.set(PREFIX + "h1:1234.memory", "4096");
|
||||||
uploadConfiguration(drConf, "dynamic-resources.xml");
|
uploadConfiguration(drConf, "dynamic-resources.xml");
|
||||||
|
|
||||||
rm.adminService.refreshNodesResources(
|
rm.adminService.refreshNodesResources(
|
||||||
|
@ -266,9 +270,9 @@ public class TestRMAdminService {
|
||||||
|
|
||||||
DynamicResourceConfiguration drConf =
|
DynamicResourceConfiguration drConf =
|
||||||
new DynamicResourceConfiguration();
|
new DynamicResourceConfiguration();
|
||||||
drConf.set("yarn.resource.dynamic.nodes", "h1:1234");
|
drConf.set(PREFIX + NODES, "h1:1234");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.vcores", "4");
|
drConf.set(PREFIX + "h1:1234.vcores", "4");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.memory", "4096");
|
drConf.set(PREFIX + "h1:1234.memory", "4096");
|
||||||
uploadConfiguration(drConf, "dynamic-resources.xml");
|
uploadConfiguration(drConf, "dynamic-resources.xml");
|
||||||
|
|
||||||
rm.adminService.refreshNodesResources(
|
rm.adminService.refreshNodesResources(
|
||||||
|
@ -316,9 +320,9 @@ public class TestRMAdminService {
|
||||||
|
|
||||||
DynamicResourceConfiguration drConf =
|
DynamicResourceConfiguration drConf =
|
||||||
new DynamicResourceConfiguration();
|
new DynamicResourceConfiguration();
|
||||||
drConf.set("yarn.resource.dynamic.nodes", "h1:1234");
|
drConf.set(PREFIX + NODES, "h1:1234");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.vcores", "4");
|
drConf.set(PREFIX + "h1:1234.vcores", "4");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.memory", "4096");
|
drConf.set(PREFIX + "h1:1234.memory", "4096");
|
||||||
uploadConfiguration(drConf, "dynamic-resources.xml");
|
uploadConfiguration(drConf, "dynamic-resources.xml");
|
||||||
|
|
||||||
rm.adminService.refreshNodesResources(
|
rm.adminService.refreshNodesResources(
|
||||||
|
@ -364,9 +368,9 @@ public class TestRMAdminService {
|
||||||
|
|
||||||
DynamicResourceConfiguration drConf =
|
DynamicResourceConfiguration drConf =
|
||||||
new DynamicResourceConfiguration();
|
new DynamicResourceConfiguration();
|
||||||
drConf.set("yarn.resource.dynamic.nodes", "h1:1234");
|
drConf.set(PREFIX + NODES, "h1:1234");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.vcores", "4");
|
drConf.set(PREFIX + "h1:1234.vcores", "4");
|
||||||
drConf.set("yarn.resource.dynamic.h1:1234.memory", "4096");
|
drConf.set(PREFIX + "h1:1234.memory", "4096");
|
||||||
uploadConfiguration(drConf, "dynamic-resources.xml");
|
uploadConfiguration(drConf, "dynamic-resources.xml");
|
||||||
|
|
||||||
rm.adminService.refreshNodesResources(
|
rm.adminService.refreshNodesResources(
|
||||||
|
@ -611,8 +615,8 @@ public class TestRMAdminService {
|
||||||
.get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
|
.get("hadoop.proxyuser.test.hosts").contains("test_hosts"));
|
||||||
|
|
||||||
Configuration yarnConf = new Configuration(false);
|
Configuration yarnConf = new Configuration(false);
|
||||||
yarnConf.set("yarn.resourcemanager.proxyuser.test.groups", "test_groups_1");
|
yarnConf.set(RM_PROXY_USER_PREFIX + "test.groups", "test_groups_1");
|
||||||
yarnConf.set("yarn.resourcemanager.proxyuser.test.hosts", "test_hosts_1");
|
yarnConf.set(RM_PROXY_USER_PREFIX + "test.hosts", "test_hosts_1");
|
||||||
uploadConfiguration(yarnConf, "yarn-site.xml");
|
uploadConfiguration(yarnConf, "yarn-site.xml");
|
||||||
|
|
||||||
// RM specific configs will overwrite the common ones
|
// RM specific configs will overwrite the common ones
|
||||||
|
@ -809,7 +813,8 @@ public class TestRMAdminService {
|
||||||
|
|
||||||
CapacitySchedulerConfiguration csConf =
|
CapacitySchedulerConfiguration csConf =
|
||||||
new CapacitySchedulerConfiguration();
|
new CapacitySchedulerConfiguration();
|
||||||
csConf.set("yarn.scheduler.capacity.maximum-applications", "5000");
|
csConf.set(CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS,
|
||||||
|
"5000");
|
||||||
uploadConfiguration(csConf, "capacity-scheduler.xml");
|
uploadConfiguration(csConf, "capacity-scheduler.xml");
|
||||||
|
|
||||||
rm1.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
|
rm1.adminService.refreshQueues(RefreshQueuesRequest.newInstance());
|
||||||
|
@ -897,7 +902,8 @@ public class TestRMAdminService {
|
||||||
|
|
||||||
CapacitySchedulerConfiguration csConf =
|
CapacitySchedulerConfiguration csConf =
|
||||||
new CapacitySchedulerConfiguration();
|
new CapacitySchedulerConfiguration();
|
||||||
csConf.set("yarn.scheduler.capacity.maximum-applications", "5000");
|
csConf.set(CapacitySchedulerConfiguration.MAXIMUM_SYSTEM_APPLICATIONS,
|
||||||
|
"5000");
|
||||||
uploadConfiguration(csConf, "capacity-scheduler.xml");
|
uploadConfiguration(csConf, "capacity-scheduler.xml");
|
||||||
|
|
||||||
String aclsString = "alice,bob users,wheel";
|
String aclsString = "alice,bob users,wheel";
|
||||||
|
|
|
@ -26,6 +26,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
||||||
import org.apache.hadoop.security.authorize.AuthorizationException;
|
import org.apache.hadoop.security.authorize.AuthorizationException;
|
||||||
import org.apache.hadoop.security.authorize.ProxyUsers;
|
import org.apache.hadoop.security.authorize.ProxyUsers;
|
||||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||||
|
import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_PROXY_USER_PREFIX;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
|
@ -50,30 +51,30 @@ public class TestRMProxyUsersConf {
|
||||||
public TestRMProxyUsersConf(int round) {
|
public TestRMProxyUsersConf(int round) {
|
||||||
conf = new YarnConfiguration();
|
conf = new YarnConfiguration();
|
||||||
switch (round) {
|
switch (round) {
|
||||||
case 0:
|
case 0:
|
||||||
// hadoop.proxyuser prefix
|
// hadoop.proxyuser prefix
|
||||||
conf.set("hadoop.proxyuser.foo.hosts", ipAddress);
|
conf.set("hadoop.proxyuser.foo.hosts", ipAddress);
|
||||||
conf.set("hadoop.proxyuser.foo.users", "bar");
|
conf.set("hadoop.proxyuser.foo.users", "bar");
|
||||||
conf.set("hadoop.proxyuser.foo.groups", "bar_group");
|
conf.set("hadoop.proxyuser.foo.groups", "bar_group");
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
// yarn.resourcemanager.proxyuser prefix
|
// yarn.resourcemanager.proxyuser prefix
|
||||||
conf.set("yarn.resourcemanager.proxyuser.foo.hosts", ipAddress);
|
conf.set(RM_PROXY_USER_PREFIX + "foo.hosts", ipAddress);
|
||||||
conf.set("yarn.resourcemanager.proxyuser.foo.users", "bar");
|
conf.set(RM_PROXY_USER_PREFIX + "foo.users", "bar");
|
||||||
conf.set("yarn.resourcemanager.proxyuser.foo.groups", "bar_group");
|
conf.set(RM_PROXY_USER_PREFIX + "foo.groups", "bar_group");
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
// hadoop.proxyuser prefix has been overwritten by
|
// hadoop.proxyuser prefix has been overwritten by
|
||||||
// yarn.resourcemanager.proxyuser prefix
|
// yarn.resourcemanager.proxyuser prefix
|
||||||
conf.set("hadoop.proxyuser.foo.hosts", "xyz");
|
conf.set("hadoop.proxyuser.foo.hosts", "xyz");
|
||||||
conf.set("hadoop.proxyuser.foo.users", "xyz");
|
conf.set("hadoop.proxyuser.foo.users", "xyz");
|
||||||
conf.set("hadoop.proxyuser.foo.groups", "xyz");
|
conf.set("hadoop.proxyuser.foo.groups", "xyz");
|
||||||
conf.set("yarn.resourcemanager.proxyuser.foo.hosts", ipAddress);
|
conf.set(RM_PROXY_USER_PREFIX + "foo.hosts", ipAddress);
|
||||||
conf.set("yarn.resourcemanager.proxyuser.foo.users", "bar");
|
conf.set(RM_PROXY_USER_PREFIX + "foo.users", "bar");
|
||||||
conf.set("yarn.resourcemanager.proxyuser.foo.groups", "bar_group");
|
conf.set(RM_PROXY_USER_PREFIX + "foo.groups", "bar_group");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -94,6 +94,7 @@ import java.util.List;
|
||||||
import java.util.Map;
|
import java.util.Map;
|
||||||
import java.util.Set;
|
import java.util.Set;
|
||||||
import java.util.concurrent.TimeUnit;
|
import java.util.concurrent.TimeUnit;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||||
|
|
||||||
import static org.junit.Assert.*;
|
import static org.junit.Assert.*;
|
||||||
import static org.mockito.Mockito.mock;
|
import static org.mockito.Mockito.mock;
|
||||||
|
@ -644,7 +645,7 @@ public class TestWorkPreservingRMRestart extends ParameterizedSchedulerTestBase
|
||||||
rm1.clearQueueMetrics(app1_2);
|
rm1.clearQueueMetrics(app1_2);
|
||||||
rm1.clearQueueMetrics(app2);
|
rm1.clearQueueMetrics(app2);
|
||||||
|
|
||||||
csConf.set("yarn.scheduler.capacity.root.Default.QueueB.state", "STOPPED");
|
csConf.set(PREFIX + "root.Default.QueueB.state", "STOPPED");
|
||||||
|
|
||||||
// Re-start RM
|
// Re-start RM
|
||||||
rm2 = new MockRM(csConf, memStore);
|
rm2 = new MockRM(csConf, memStore);
|
||||||
|
|
|
@ -389,10 +389,9 @@ public class TestRMApplicationHistoryWriter {
|
||||||
YarnConfiguration conf = new YarnConfiguration();
|
YarnConfiguration conf = new YarnConfiguration();
|
||||||
if (isFS) {
|
if (isFS) {
|
||||||
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true);
|
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, true);
|
||||||
conf.set("yarn.resourcemanager.scheduler.class",
|
conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
|
||||||
FairScheduler.class.getName());
|
|
||||||
} else {
|
} else {
|
||||||
conf.set("yarn.resourcemanager.scheduler.class",
|
conf.set(YarnConfiguration.RM_SCHEDULER,
|
||||||
CapacityScheduler.class.getName());
|
CapacityScheduler.class.getName());
|
||||||
}
|
}
|
||||||
// don't process history events
|
// don't process history events
|
||||||
|
|
|
@ -157,8 +157,7 @@ public class TestProportionalCapacityPreemptionPolicy {
|
||||||
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
|
conf.setBoolean(YarnConfiguration.RM_SCHEDULER_ENABLE_MONITORS, true);
|
||||||
// FairScheduler doesn't support this test,
|
// FairScheduler doesn't support this test,
|
||||||
// Set CapacityScheduler as the scheduler for this test.
|
// Set CapacityScheduler as the scheduler for this test.
|
||||||
conf.set("yarn.resourcemanager.scheduler.class",
|
conf.set(YarnConfiguration.RM_SCHEDULER, CapacityScheduler.class.getName());
|
||||||
CapacityScheduler.class.getName());
|
|
||||||
|
|
||||||
mClock = mock(Clock.class);
|
mClock = mock(Clock.class);
|
||||||
mCS = mock(CapacityScheduler.class);
|
mCS = mock(CapacityScheduler.class);
|
||||||
|
|
|
@ -60,6 +60,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSe
|
||||||
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
|
import org.apache.hadoop.yarn.util.resource.DefaultResourceCalculator;
|
||||||
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
|
import org.apache.hadoop.yarn.util.resource.ResourceCalculator;
|
||||||
import org.apache.hadoop.yarn.util.resource.Resources;
|
import org.apache.hadoop.yarn.util.resource.Resources;
|
||||||
|
import static org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacitySchedulerConfiguration.PREFIX;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
import org.junit.Before;
|
import org.junit.Before;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
@ -339,11 +340,8 @@ public class TestApplicationLimits {
|
||||||
);
|
);
|
||||||
|
|
||||||
// Change the per-queue max AM resources percentage.
|
// Change the per-queue max AM resources percentage.
|
||||||
csConf.setFloat(
|
csConf.setFloat(PREFIX + queue.getQueuePath()
|
||||||
"yarn.scheduler.capacity." +
|
+ ".maximum-am-resource-percent", 0.5f);
|
||||||
queue.getQueuePath() +
|
|
||||||
".maximum-am-resource-percent",
|
|
||||||
0.5f);
|
|
||||||
// Re-create queues to get new configs.
|
// Re-create queues to get new configs.
|
||||||
queues = new HashMap<String, CSQueue>();
|
queues = new HashMap<String, CSQueue>();
|
||||||
root =
|
root =
|
||||||
|
@ -364,10 +362,8 @@ public class TestApplicationLimits {
|
||||||
Resource.newInstance(400*GB, 1));
|
Resource.newInstance(400*GB, 1));
|
||||||
|
|
||||||
// Change the per-queue max applications.
|
// Change the per-queue max applications.
|
||||||
csConf.setInt(
|
csConf.setInt(PREFIX + queue.getQueuePath() + ".maximum-applications",
|
||||||
"yarn.scheduler.capacity." +
|
9999);
|
||||||
queue.getQueuePath() +
|
|
||||||
".maximum-applications", 9999);
|
|
||||||
// Re-create queues to get new configs.
|
// Re-create queues to get new configs.
|
||||||
queues = new HashMap<String, CSQueue>();
|
queues = new HashMap<String, CSQueue>();
|
||||||
root =
|
root =
|
||||||
|
|
|
@ -66,7 +66,8 @@ public class TestCapacitySchedulerQueueACLs extends QueueACLsTestBase {
|
||||||
csConf.setAcls(CapacitySchedulerConfiguration.ROOT, aclsOnRootQueue);
|
csConf.setAcls(CapacitySchedulerConfiguration.ROOT, aclsOnRootQueue);
|
||||||
|
|
||||||
csConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
csConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||||
csConf.set("yarn.resourcemanager.scheduler.class", CapacityScheduler.class.getName());
|
csConf.set(YarnConfiguration.RM_SCHEDULER,
|
||||||
|
CapacityScheduler.class.getName());
|
||||||
|
|
||||||
return csConf;
|
return csConf;
|
||||||
}
|
}
|
||||||
|
|
|
@ -154,9 +154,9 @@ public class TestLeafQueue {
|
||||||
|
|
||||||
csConf =
|
csConf =
|
||||||
new CapacitySchedulerConfiguration();
|
new CapacitySchedulerConfiguration();
|
||||||
csConf.setBoolean("yarn.scheduler.capacity.user-metrics.enable", true);
|
csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
|
||||||
csConf.setBoolean(
|
csConf.setBoolean(CapacitySchedulerConfiguration.RESERVE_CONT_LOOK_ALL_NODES,
|
||||||
"yarn.scheduler.capacity.reservations-continue-look-all-nodes", false);
|
false);
|
||||||
final String newRoot = "root" + System.currentTimeMillis();
|
final String newRoot = "root" + System.currentTimeMillis();
|
||||||
setupQueueConfiguration(csConf, newRoot);
|
setupQueueConfiguration(csConf, newRoot);
|
||||||
YarnConfiguration conf = new YarnConfiguration();
|
YarnConfiguration conf = new YarnConfiguration();
|
||||||
|
|
|
@ -112,7 +112,7 @@ public class TestReservations {
|
||||||
private void setup(CapacitySchedulerConfiguration csConf,
|
private void setup(CapacitySchedulerConfiguration csConf,
|
||||||
boolean addUserLimits) throws Exception {
|
boolean addUserLimits) throws Exception {
|
||||||
|
|
||||||
csConf.setBoolean("yarn.scheduler.capacity.user-metrics.enable", true);
|
csConf.setBoolean(CapacitySchedulerConfiguration.ENABLE_USER_METRICS, true);
|
||||||
final String newRoot = "root" + System.currentTimeMillis();
|
final String newRoot = "root" + System.currentTimeMillis();
|
||||||
// final String newRoot = "root";
|
// final String newRoot = "root";
|
||||||
|
|
||||||
|
@ -502,8 +502,8 @@ public class TestReservations {
|
||||||
queues = new HashMap<String, CSQueue>();
|
queues = new HashMap<String, CSQueue>();
|
||||||
// test that the deadlock occurs when turned off
|
// test that the deadlock occurs when turned off
|
||||||
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
|
CapacitySchedulerConfiguration csConf = new CapacitySchedulerConfiguration();
|
||||||
csConf.setBoolean(
|
csConf.setBoolean(CapacitySchedulerConfiguration.RESERVE_CONT_LOOK_ALL_NODES,
|
||||||
"yarn.scheduler.capacity.reservations-continue-look-all-nodes", false);
|
false);
|
||||||
setup(csConf);
|
setup(csConf);
|
||||||
|
|
||||||
// Manipulate queue 'a'
|
// Manipulate queue 'a'
|
||||||
|
|
|
@ -45,7 +45,7 @@ public class TestFairSchedulerEventLog {
|
||||||
Configuration conf = new YarnConfiguration();
|
Configuration conf = new YarnConfiguration();
|
||||||
conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
|
conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
|
||||||
ResourceScheduler.class);
|
ResourceScheduler.class);
|
||||||
conf.set("yarn.scheduler.fair.event-log-enabled", "true");
|
conf.set(FairSchedulerConfiguration.EVENT_LOG_ENABLED, "true");
|
||||||
|
|
||||||
// All tests assume only one assignment per node update
|
// All tests assume only one assignment per node update
|
||||||
conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false");
|
conf.set(FairSchedulerConfiguration.ASSIGN_MULTIPLE, "false");
|
||||||
|
|
|
@ -55,7 +55,7 @@ public class TestFairSchedulerQueueACLs extends QueueACLsTestBase {
|
||||||
fsConf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
|
fsConf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE);
|
||||||
|
|
||||||
fsConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
fsConf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||||
fsConf.set("yarn.resourcemanager.scheduler.class", FairScheduler.class.getName());
|
fsConf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
|
||||||
|
|
||||||
return fsConf;
|
return fsConf;
|
||||||
}
|
}
|
||||||
|
|
|
@ -197,7 +197,7 @@ public class TestRMWebServicesAppsModification extends JerseyTestBase {
|
||||||
private class CapTestServletModule extends TestServletModule {
|
private class CapTestServletModule extends TestServletModule {
|
||||||
@Override
|
@Override
|
||||||
public void configureScheduler() {
|
public void configureScheduler() {
|
||||||
conf.set("yarn.resourcemanager.scheduler.class",
|
conf.set(YarnConfiguration.RM_SCHEDULER,
|
||||||
CapacityScheduler.class.getName());
|
CapacityScheduler.class.getName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -223,8 +223,7 @@ public class TestRMWebServicesAppsModification extends JerseyTestBase {
|
||||||
} catch(IOException e) {
|
} catch(IOException e) {
|
||||||
}
|
}
|
||||||
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, FS_ALLOC_FILE);
|
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, FS_ALLOC_FILE);
|
||||||
conf.set("yarn.resourcemanager.scheduler.class",
|
conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
|
||||||
FairScheduler.class.getName());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -58,7 +58,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fifo.FifoScheduler;
|
||||||
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
|
import org.apache.hadoop.yarn.server.resourcemanager.webapp.dao.ApplicationSubmissionContextInfo;
|
||||||
import org.apache.hadoop.yarn.util.ConverterUtils;
|
|
||||||
import org.codehaus.jettison.json.JSONObject;
|
import org.codehaus.jettison.json.JSONObject;
|
||||||
import org.junit.AfterClass;
|
import org.junit.AfterClass;
|
||||||
import org.junit.Assert;
|
import org.junit.Assert;
|
||||||
|
@ -66,6 +65,7 @@ import org.junit.BeforeClass;
|
||||||
import org.junit.Test;
|
import org.junit.Test;
|
||||||
|
|
||||||
import com.sun.jersey.api.client.ClientResponse.Status;
|
import com.sun.jersey.api.client.ClientResponse.Status;
|
||||||
|
import static org.apache.hadoop.yarn.conf.YarnConfiguration.RM_PROXY_USER_PREFIX;
|
||||||
import org.junit.runner.RunWith;
|
import org.junit.runner.RunWith;
|
||||||
import org.junit.runners.Parameterized;
|
import org.junit.runners.Parameterized;
|
||||||
|
|
||||||
|
@ -159,8 +159,8 @@ public class TestRMWebServicesDelegationTokenAuthentication {
|
||||||
rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
|
rmconf.set(YarnConfiguration.NM_WEBAPP_SPNEGO_KEYTAB_FILE_KEY,
|
||||||
httpSpnegoKeytabFile.getAbsolutePath());
|
httpSpnegoKeytabFile.getAbsolutePath());
|
||||||
rmconf.setBoolean("mockrm.webapp.enabled", true);
|
rmconf.setBoolean("mockrm.webapp.enabled", true);
|
||||||
rmconf.set("yarn.resourcemanager.proxyuser.client.hosts", "*");
|
rmconf.set(RM_PROXY_USER_PREFIX + "client.hosts", "*");
|
||||||
rmconf.set("yarn.resourcemanager.proxyuser.client.groups", "*");
|
rmconf.set(RM_PROXY_USER_PREFIX + "client.groups", "*");
|
||||||
UserGroupInformation.setConfiguration(rmconf);
|
UserGroupInformation.setConfiguration(rmconf);
|
||||||
rm = new MockRM(rmconf);
|
rm = new MockRM(rmconf);
|
||||||
rm.start();
|
rm.start();
|
||||||
|
|
|
@ -104,8 +104,8 @@ public class TestRMWebServicesHttpStaticUserPermissions {
|
||||||
rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
rmconf.setBoolean(YarnConfiguration.YARN_ACL_ENABLE, true);
|
||||||
rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
|
rmconf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
|
||||||
"kerberos");
|
"kerberos");
|
||||||
rmconf.set("yarn.resourcemanager.principal", spnegoPrincipal);
|
rmconf.set(YarnConfiguration.RM_PRINCIPAL, spnegoPrincipal);
|
||||||
rmconf.set("yarn.resourcemanager.keytab",
|
rmconf.set(YarnConfiguration.RM_KEYTAB,
|
||||||
spnegoKeytabFile.getAbsolutePath());
|
spnegoKeytabFile.getAbsolutePath());
|
||||||
rmconf.setBoolean("mockrm.webapp.enabled", true);
|
rmconf.setBoolean("mockrm.webapp.enabled", true);
|
||||||
UserGroupInformation.setConfiguration(rmconf);
|
UserGroupInformation.setConfiguration(rmconf);
|
||||||
|
|
|
@ -178,7 +178,7 @@ public class TestRMWebServicesReservation extends JerseyTestBase {
|
||||||
private class CapTestServletModule extends TestServletModule {
|
private class CapTestServletModule extends TestServletModule {
|
||||||
@Override
|
@Override
|
||||||
public void configureScheduler() {
|
public void configureScheduler() {
|
||||||
conf.set("yarn.resourcemanager.scheduler.class",
|
conf.set(YarnConfiguration.RM_SCHEDULER,
|
||||||
CapacityScheduler.class.getName());
|
CapacityScheduler.class.getName());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -204,8 +204,7 @@ public class TestRMWebServicesReservation extends JerseyTestBase {
|
||||||
} catch (IOException e) {
|
} catch (IOException e) {
|
||||||
}
|
}
|
||||||
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, FS_ALLOC_FILE);
|
conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, FS_ALLOC_FILE);
|
||||||
conf.set("yarn.resourcemanager.scheduler.class",
|
conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
|
||||||
FairScheduler.class.getName());
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue