From df93e27dd64d6cb0c016480b0c9f41ef973f080a Mon Sep 17 00:00:00 2001 From: Arun Murthy Date: Tue, 18 Oct 2011 01:23:14 +0000 Subject: [PATCH] Merge -c 1185447 from trunk to branch-0.23 to complete fix for MAPREDUCE-3068. git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1185448 13f79535-47bb-0310-9956-ffa450edef68 --- hadoop-mapreduce-project/CHANGES.txt | 4 + .../hadoop/mapred/MapReduceChildJVM.java | 8 +- .../v2/app/job/impl/TaskAttemptImpl.java | 3 +- .../hadoop/mapreduce/v2/util/MRApps.java | 64 +----- .../hadoop-yarn/bin/yarn-config.sh | 4 + .../hadoop/yarn/api/ApplicationConstants.java | 5 + .../hadoop/yarn/conf/YarnConfiguration.java | 8 + .../org/apache/hadoop/yarn/util/Apps.java | 52 +++++ .../src/main/resources/yarn-default.xml | 12 ++ .../launcher/ContainerLaunch.java | 21 +- .../launcher/TestContainerLaunch.java | 184 +++++++++++++++++- 11 files changed, 294 insertions(+), 71 deletions(-) diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index d8752d8a692..90ff0cbb335 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -349,6 +349,10 @@ Release 0.23.0 - Unreleased MAPREDUCE-3136. Added documentation for setting up Hadoop clusters in both non-secure and secure mode for both HDFS & YARN. (acmurthy) + MAPREDUCE-3068. Added a whitelist of environment variables for containers + from the NodeManager and set MALLOC_ARENA_MAX for all daemons and + containers. (Chris Riccomini via acmurthy) + OPTIMIZATIONS MAPREDUCE-2026. Make JobTracker.getJobCounters() and diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java index ce6557abd03..8c80617ae6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/MapReduceChildJVM.java @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.TaskLog.LogName; import org.apache.hadoop.mapreduce.ID; import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.yarn.api.ApplicationConstants; import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; import org.apache.hadoop.util.StringUtils; @@ -78,15 +78,15 @@ public class MapReduceChildJVM { ); // Add pwd to LD_LIBRARY_PATH, add this before adding anything else - MRApps.addToEnvironment( + Apps.addToEnvironment( environment, Environment.LD_LIBRARY_PATH.name(), Environment.PWD.$()); // Add the env variables passed by the user & admin String mapredChildEnv = getChildEnv(conf, task.isMapTask()); - MRApps.setEnvFromInputString(environment, mapredChildEnv); - MRApps.setEnvFromInputString( + Apps.setEnvFromInputString(environment, mapredChildEnv); + Apps.setEnvFromInputString( environment, conf.get( MRJobConfig.MAPRED_ADMIN_USER_ENV, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index 3410b30e8e1..ee659e6a5ee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -93,6 +93,7 @@ import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent; import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent; import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent; import org.apache.hadoop.mapreduce.v2.util.MRApps; +import org.apache.hadoop.yarn.util.Apps; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; @@ -616,7 +617,7 @@ public abstract class TaskAttemptImpl implements serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, ShuffleHandler.serializeServiceData(jobToken)); - MRApps.addToEnvironment( + Apps.addToEnvironment( environment, Environment.CLASSPATH.name(), getInitialClasspath()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java index 9094da39ba3..0eee5ce8e4b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java @@ -182,17 +182,17 @@ public class MRApps extends Apps { reader = new BufferedReader(new InputStreamReader(classpathFileStream)); String cp = reader.readLine(); if (cp != null) { - addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim()); + Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim()); } // Put the file itself on classpath for tasks. - addToEnvironment( + Apps.addToEnvironment( environment, Environment.CLASSPATH.name(), thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile()); // Add standard Hadoop classes for (String c : ApplicationConstants.APPLICATION_CLASSPATH) { - addToEnvironment(environment, Environment.CLASSPATH.name(), c); + Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c); } } finally { if (classpathFileStream != null) { @@ -205,28 +205,13 @@ public class MRApps extends Apps { // TODO: Remove duplicates. } - private static final String SYSTEM_PATH_SEPARATOR = - System.getProperty("path.separator"); - - public static void addToEnvironment( - Map environment, - String variable, String value) { - String val = environment.get(variable); - if (val == null) { - val = value; - } else { - val = val + SYSTEM_PATH_SEPARATOR + value; - } - environment.put(variable, val); - } - public static void setClasspath(Map environment) throws IOException { - MRApps.addToEnvironment( + Apps.addToEnvironment( environment, Environment.CLASSPATH.name(), MRJobConfig.JOB_JAR); - MRApps.addToEnvironment( + Apps.addToEnvironment( environment, Environment.CLASSPATH.name(), Environment.PWD.$() + Path.SEPARATOR + "*"); @@ -355,43 +340,4 @@ public class MRApps extends Apps { } return result; } - - public static void setEnvFromInputString(Map env, - String envString) { - if (envString != null && envString.length() > 0) { - String childEnvs[] = envString.split(","); - for (String cEnv : childEnvs) { - String[] parts = cEnv.split("="); // split on '=' - String value = env.get(parts[0]); - - if (value != null) { - // Replace $env with the child's env constructed by NM's - // For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp - value = parts[1].replace("$" + parts[0], value); - } else { - // example PATH=$PATH:/tmp - value = System.getenv(parts[0]); - if (value != null) { - // the env key is present in the tt's env - value = parts[1].replace("$" + parts[0], value); - } else { - // check for simple variable substitution - // for e.g. ROOT=$HOME - String envValue = System.getenv(parts[1].substring(1)); - if (envValue != null) { - value = envValue; - } else { - // the env key is note present anywhere .. simply set it - // example X=$X:/tmp or X=/tmp - value = parts[1].replace("$" + parts[0], ""); - } - } - } - addToEnvironment(env, parts[0], value); - } - } - } - - - } diff --git a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh index 87cda2684cb..4371484b866 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh +++ b/hadoop-mapreduce-project/hadoop-yarn/bin/yarn-config.sh @@ -39,6 +39,10 @@ this="$bin/$script" # the root of the Hadoop installation export YARN_HOME=`dirname "$this"`/.. +# Same glibc bug that discovered in Hadoop. +# Without this you can see very large vmem settings on containers. +export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4} + #check to see if the conf dir is given as an optional argument if [ $# -gt 1 ] then diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java index 99f145fbdc3..18bdce7286d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/ApplicationConstants.java @@ -140,6 +140,11 @@ public interface ApplicationConstants { */ HADOOP_HDFS_HOME("HADOOP_HDFS_HOME"), + /** + * $MALLOC_ARENA_MAX + */ + MALLOC_ARENA_MAX("MALLOC_ARENA_MAX"), + /** * $YARN_HOME */ diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index aec4e194d2d..5d4c39a9d60 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -213,6 +213,14 @@ public class YarnConfiguration extends Configuration { /** Prefix for all node manager configs.*/ public static final String NM_PREFIX = "yarn.nodemanager."; + + /** Environment variables that will be sent to containers.*/ + public static final String NM_ADMIN_USER_ENV = NM_PREFIX + "admin-env"; + public static final String DEFAULT_NM_ADMIN_USER_ENV = "MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX"; + + /** Environment variables that containers may override rather than use NodeManager's default.*/ + public static final String NM_ENV_WHITELIST = NM_PREFIX + "env-whitelist"; + public static final String DEFAULT_NM_ENV_WHITELIST = "JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME"; /** address of node manager IPC.*/ public static final String NM_ADDRESS = NM_PREFIX + "address"; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java index b5fca9f00bf..944a13448eb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/Apps.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.util; import java.util.Iterator; +import java.util.Map; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -59,4 +60,55 @@ public class Apps { public static void throwParseException(String name, String s) { throw new YarnException(join("Error parsing ", name, ": ", s)); } + + public static void setEnvFromInputString(Map env, + String envString) { + if (envString != null && envString.length() > 0) { + String childEnvs[] = envString.split(","); + for (String cEnv : childEnvs) { + String[] parts = cEnv.split("="); // split on '=' + String value = env.get(parts[0]); + + if (value != null) { + // Replace $env with the child's env constructed by NM's + // For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp + value = parts[1].replace("$" + parts[0], value); + } else { + // example PATH=$PATH:/tmp + value = System.getenv(parts[0]); + if (value != null) { + // the env key is present in the tt's env + value = parts[1].replace("$" + parts[0], value); + } else { + // check for simple variable substitution + // for e.g. ROOT=$HOME + String envValue = System.getenv(parts[1].substring(1)); + if (envValue != null) { + value = envValue; + } else { + // the env key is note present anywhere .. simply set it + // example X=$X:/tmp or X=/tmp + value = parts[1].replace("$" + parts[0], ""); + } + } + } + addToEnvironment(env, parts[0], value); + } + } + } + + private static final String SYSTEM_PATH_SEPARATOR = + System.getProperty("path.separator"); + + public static void addToEnvironment( + Map environment, + String variable, String value) { + String val = environment.get(variable); + if (val == null) { + val = value; + } else { + val = val + SYSTEM_PATH_SEPARATOR + value; + } + environment.put(variable, val); + } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml index 806d31cbab6..19d0386b454 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/yarn-default.xml @@ -194,6 +194,18 @@ 0.0.0.0:45454 + + Environment variables that should be forwarded from the NodeManager's environment to the container's. + yarn.nodemanager.admin-env + MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX + + + + Environment variables that containers may override rather than use NodeManager's default. + yarn.nodemanager.env-whitelist + JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,YARN_HOME + + who will execute(launch) the containers. yarn.nodemanager.container-executor.class diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index a4c9d625c40..217c1b02e5f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -57,7 +57,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ResourceLocalizationService; import org.apache.hadoop.yarn.util.ConverterUtils; - +import org.apache.hadoop.yarn.util.Apps; public class ContainerLaunch implements Callable { private static final Log LOG = LogFactory.getLog(ContainerLaunch.class); @@ -309,7 +309,7 @@ public class ContainerLaunch implements Callable { /** * Non-modifiable environment variables */ - + putEnvIfNotNull(environment, Environment.USER.name(), container.getUser()); putEnvIfNotNull(environment, @@ -343,11 +343,20 @@ public class ContainerLaunch implements Callable { * Modifiable environment variables */ - putEnvIfAbsent(environment, Environment.JAVA_HOME.name()); - putEnvIfAbsent(environment, Environment.HADOOP_COMMON_HOME.name()); - putEnvIfAbsent(environment, Environment.HADOOP_HDFS_HOME.name()); - putEnvIfAbsent(environment, Environment.YARN_HOME.name()); + // allow containers to override these variables + String[] whitelist = conf.get(YarnConfiguration.NM_ENV_WHITELIST, YarnConfiguration.DEFAULT_NM_ENV_WHITELIST).split(","); + + for(String whitelistEnvVariable : whitelist) { + putEnvIfAbsent(environment, whitelistEnvVariable.trim()); + } + // variables here will be forced in, even if the container has specified them. + Apps.setEnvFromInputString( + environment, + conf.get( + YarnConfiguration.NM_ADMIN_USER_ENV, + YarnConfiguration.DEFAULT_NM_ADMIN_USER_ENV) + ); } static void writeLaunchEnv(OutputStream out, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java index 8fc35158bc7..372ec93d1d7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/TestContainerLaunch.java @@ -20,21 +20,64 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher; import static org.junit.Assert.*; +import java.io.BufferedReader; import java.io.File; import java.io.FileOutputStream; +import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; +import java.lang.reflect.Field; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.regex.Pattern; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.util.Shell; +import org.apache.hadoop.yarn.api.ApplicationConstants.Environment; +import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; +import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; +import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; +import org.apache.hadoop.yarn.api.records.ContainerState; +import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.LocalResource; +import org.apache.hadoop.yarn.api.records.LocalResourceType; +import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode; +import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal; +import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest; import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch; +import org.apache.hadoop.yarn.util.ConverterUtils; +import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin; +import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree; +import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin; +import org.junit.Before; import org.junit.Test; +import junit.framework.Assert; -public class TestContainerLaunch { +public class TestContainerLaunch extends BaseContainerManagerTest { + + public TestContainerLaunch() throws UnsupportedFileSystemException { + super(); + } + + @Before + public void setup() throws IOException { + conf.setClass( + YarnConfiguration.NM_CONTAINER_MON_RESOURCE_CALCULATOR, + LinuxResourceCalculatorPlugin.class, ResourceCalculatorPlugin.class); + super.setup(); + } @Test public void testSpecialCharSymlinks() throws IOException { @@ -96,5 +139,144 @@ public class TestContainerLaunch { } } } + + // this is a dirty hack - but should be ok for a unittest. + public static void setNewEnvironmentHack(Map newenv) throws Exception { + Class[] classes = Collections.class.getDeclaredClasses(); + Map env = System.getenv(); + for (Class cl : classes) { + if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) { + Field field = cl.getDeclaredField("m"); + field.setAccessible(true); + Object obj = field.get(env); + Map map = (Map) obj; + map.clear(); + map.putAll(newenv); + } + } + } + /** + * See if environment variable is forwarded using sanitizeEnv. + * @throws Exception + */ + @Test + public void testContainerEnvVariables() throws Exception { + int exitCode = 0; + containerManager.start(); + + Map envWithDummy = new HashMap(); + envWithDummy.putAll(System.getenv()); + envWithDummy.put(Environment.MALLOC_ARENA_MAX.name(), "99"); + setNewEnvironmentHack(envWithDummy); + + String malloc = System.getenv(Environment.MALLOC_ARENA_MAX.name()); + File scriptFile = new File(tmpDir, "scriptFile.sh"); + PrintWriter fileWriter = new PrintWriter(scriptFile); + File processStartFile = + new File(tmpDir, "env_vars.txt").getAbsoluteFile(); + fileWriter.write("\numask 0"); // So that start file is readable by the test + fileWriter.write("\necho $" + Environment.MALLOC_ARENA_MAX.name() + " > " + processStartFile); + fileWriter.write("\necho $$ >> " + processStartFile); + fileWriter.write("\nexec sleep 100"); + fileWriter.close(); + + assert(malloc != null && !"".equals(malloc)); + + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); + + // ////// Construct the Container-id + ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); + appId.setClusterTimestamp(0); + appId.setId(0); + ApplicationAttemptId appAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); + appAttemptId.setApplicationId(appId); + appAttemptId.setAttemptId(1); + ContainerId cId = + recordFactory.newRecordInstance(ContainerId.class); + cId.setApplicationAttemptId(appAttemptId); + containerLaunchContext.setContainerId(cId); + + containerLaunchContext.setUser(user); + + // upload the script file so that the container can run it + URL resource_alpha = + ConverterUtils.getYarnUrlFromPath(localFS + .makeQualified(new Path(scriptFile.getAbsolutePath()))); + LocalResource rsrc_alpha = + recordFactory.newRecordInstance(LocalResource.class); + rsrc_alpha.setResource(resource_alpha); + rsrc_alpha.setSize(-1); + rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); + rsrc_alpha.setType(LocalResourceType.FILE); + rsrc_alpha.setTimestamp(scriptFile.lastModified()); + String destinationFile = "dest_file"; + Map localResources = + new HashMap(); + localResources.put(destinationFile, rsrc_alpha); + containerLaunchContext.setLocalResources(localResources); + + // set up the rest of the container + containerLaunchContext.setUser(containerLaunchContext.getUser()); + List commands = new ArrayList(); + commands.add("/bin/bash"); + commands.add(scriptFile.getAbsolutePath()); + containerLaunchContext.setCommands(commands); + containerLaunchContext.setResource(recordFactory + .newRecordInstance(Resource.class)); + containerLaunchContext.getResource().setMemory(100 * 1024 * 1024); + StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class); + startRequest.setContainerLaunchContext(containerLaunchContext); + containerManager.startContainer(startRequest); + + int timeoutSecs = 0; + while (!processStartFile.exists() && timeoutSecs++ < 20) { + Thread.sleep(1000); + LOG.info("Waiting for process start-file to be created"); + } + Assert.assertTrue("ProcessStartFile doesn't exist!", + processStartFile.exists()); + + // Now verify the contents of the file + BufferedReader reader = + new BufferedReader(new FileReader(processStartFile)); + Assert.assertEquals(malloc, reader.readLine()); + // Get the pid of the process + String pid = reader.readLine().trim(); + // No more lines + Assert.assertEquals(null, reader.readLine()); + + // Now test the stop functionality. + + // Assert that the process is alive + Assert.assertTrue("Process is not alive!", + exec.signalContainer(user, + pid, Signal.NULL)); + // Once more + Assert.assertTrue("Process is not alive!", + exec.signalContainer(user, + pid, Signal.NULL)); + + StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class); + stopRequest.setContainerId(cId); + containerManager.stopContainer(stopRequest); + + BaseContainerManagerTest.waitForContainerState(containerManager, cId, + ContainerState.COMPLETE); + + GetContainerStatusRequest gcsRequest = + recordFactory.newRecordInstance(GetContainerStatusRequest.class); + gcsRequest.setContainerId(cId); + ContainerStatus containerStatus = + containerManager.getContainerStatus(gcsRequest).getStatus(); + Assert.assertEquals(ExitCode.KILLED.getExitCode(), + containerStatus.getExitStatus()); + + // Assert that the process is not alive anymore + Assert.assertFalse("Process is still alive!", + exec.signalContainer(user, + pid, Signal.NULL)); + } }