From 2abab1d7c53e64c160384fd5a3ac4cd8ffa57af4 Mon Sep 17 00:00:00 2001 From: Jason Lowe Date: Tue, 12 Dec 2017 16:04:15 -0600 Subject: [PATCH] YARN-7595. Container launching code suppresses close exceptions after writes. Contributed by Jim Brennan --- .../nodemanager/DefaultContainerExecutor.java | 25 +++------ .../launcher/ContainerLaunch.java | 52 +++++++++---------- .../JavaSandboxLinuxContainerRuntime.java | 17 +++--- 3 files changed, 40 insertions(+), 54 deletions(-) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index 5d78f9d4c43..5772403567e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -42,12 +42,10 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.UnsupportedFileSystemException; import org.apache.hadoop.fs.permission.FsPermission; -import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.Shell; import org.apache.hadoop.util.Shell.CommandExecutor; import org.apache.hadoop.util.Shell.ExitCodeException; import org.apache.hadoop.util.Shell.ShellCommandExecutor; -import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; @@ -414,15 +412,11 @@ public class DefaultContainerExecutor extends ContainerExecutor { */ public void writeLocalWrapperScript(Path launchDst, Path pidFile) throws IOException { - DataOutputStream out = null; - PrintStream pout = null; - - try { - out = lfs.create(wrapperScriptPath, EnumSet.of(CREATE, OVERWRITE)); - pout = new PrintStream(out, false, "UTF-8"); + try (DataOutputStream out = + lfs.create(wrapperScriptPath, EnumSet.of(CREATE, OVERWRITE)); + PrintStream pout = + new PrintStream(out, false, "UTF-8")) { writeLocalWrapperScript(launchDst, pidFile, pout); - } finally { - IOUtils.cleanupWithLogger(LOG, pout, out); } } @@ -489,11 +483,10 @@ public class DefaultContainerExecutor extends ContainerExecutor { private void writeSessionScript(Path launchDst, Path pidFile) throws IOException { - DataOutputStream out = null; - PrintStream pout = null; - try { - out = lfs.create(sessionScriptPath, EnumSet.of(CREATE, OVERWRITE)); - pout = new PrintStream(out, false, "UTF-8"); + try (DataOutputStream out = + lfs.create(sessionScriptPath, EnumSet.of(CREATE, OVERWRITE)); + PrintStream pout = + new PrintStream(out, false, "UTF-8")) { // We need to do a move as writing to a file is not atomic // Process reading a file being written to may get garbled data // hence write pid to tmp file first followed by a mv @@ -503,8 +496,6 @@ public class DefaultContainerExecutor extends ContainerExecutor { pout.println("/bin/mv -f " + pidFile.toString() + ".tmp " + pidFile); String exec = Shell.isSetsidAvailable? "exec setsid" : "exec"; pout.printf("%s /bin/bash \"%s\"", exec, launchDst.toUri().getPath()); - } finally { - IOUtils.cleanupWithLogger(LOG, pout, out); } lfs.setPermission(sessionScriptPath, ContainerExecutor.TASK_LAUNCH_SCRIPT_PERMISSION); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index f1c826e00f6..db90215b131 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -220,15 +220,13 @@ public class ContainerLaunch implements Callable { containerIdStr)); Path nmPrivateClasspathJarDir = dirsHandler.getLocalPathForWrite( getContainerPrivateDir(appIdStr, containerIdStr)); - DataOutputStream containerScriptOutStream = null; - DataOutputStream tokensOutStream = null; // Select the working directory for the container Path containerWorkDir = deriveContainerWorkDir(); recordContainerWorkDir(containerID, containerWorkDir.toString()); String pidFileSubpath = getPidFileSubpath(appIdStr, containerIdStr); - // pid file should be in nm private dir so that it is not + // pid file should be in nm private dir so that it is not // accessible by users pidFilePath = dirsHandler.getLocalPathForWrite(pidFileSubpath); List localDirs = dirsHandler.getLocalDirs(); @@ -243,24 +241,24 @@ public class ContainerLaunch implements Callable { throw new IOException("Most of the disks failed. " + dirsHandler.getDisksHealthReport(false)); } - try { - // /////////// Write out the container-script in the nmPrivate space. - List appDirs = new ArrayList(localDirs.size()); - for (String localDir : localDirs) { - Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE); - Path userdir = new Path(usersdir, user); - Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE); - appDirs.add(new Path(appsdir, appIdStr)); - } - containerScriptOutStream = - lfs.create(nmPrivateContainerScriptPath, - EnumSet.of(CREATE, OVERWRITE)); + List appDirs = new ArrayList(localDirs.size()); + for (String localDir : localDirs) { + Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE); + Path userdir = new Path(usersdir, user); + Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE); + appDirs.add(new Path(appsdir, appIdStr)); + } - // Set the token location too. - environment.put( - ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, - new Path(containerWorkDir, - FINAL_CONTAINER_TOKENS_FILE).toUri().getPath()); + // Set the token location too. + environment.put( + ApplicationConstants.CONTAINER_TOKEN_FILE_ENV_NAME, + new Path(containerWorkDir, + FINAL_CONTAINER_TOKENS_FILE).toUri().getPath()); + + // /////////// Write out the container-script in the nmPrivate space. + try (DataOutputStream containerScriptOutStream = + lfs.create(nmPrivateContainerScriptPath, + EnumSet.of(CREATE, OVERWRITE))) { // Sanitize the container's environment sanitizeEnv(environment, containerWorkDir, appDirs, userLocalDirs, containerLogDirs, localResources, nmPrivateClasspathJarDir); @@ -271,18 +269,16 @@ public class ContainerLaunch implements Callable { exec.writeLaunchEnv(containerScriptOutStream, environment, localResources, launchContext.getCommands(), new Path(containerLogDirs.get(0)), user); - // /////////// End of writing out container-script + } + // /////////// End of writing out container-script - // /////////// Write out the container-tokens in the nmPrivate space. - tokensOutStream = - lfs.create(nmPrivateTokensPath, EnumSet.of(CREATE, OVERWRITE)); + // /////////// Write out the container-tokens in the nmPrivate space. + try (DataOutputStream tokensOutStream = + lfs.create(nmPrivateTokensPath, EnumSet.of(CREATE, OVERWRITE))) { Credentials creds = container.getCredentials(); creds.writeTokenStorageToStream(tokensOutStream); - // /////////// End of writing out container-tokens - } finally { - IOUtils.cleanupWithLogger(LOG, containerScriptOutStream, - tokensOutStream); } + // /////////// End of writing out container-tokens ret = launchContainer(new ContainerStartContext.Builder() .setContainer(container) diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java index 245b38faaf5..1ab1fc58110 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/linux/runtime/JavaSandboxLinuxContainerRuntime.java @@ -231,7 +231,6 @@ public class JavaSandboxLinuxContainerRuntime throw new ContainerExecutionException("hadoop.tmp.dir not set!"); } - OutputStream policyOutputStream = null; try { String containerID = ctx.getExecutionAttribute(CONTAINER_ID_STR); initializePolicyDir(); @@ -242,19 +241,19 @@ public class JavaSandboxLinuxContainerRuntime Paths.get(policyFileDir.toString(), containerID + "-" + NMContainerPolicyUtils.POLICY_FILE), POLICY_ATTR); - policyOutputStream = Files.newOutputStream(policyFilePath); - containerPolicies.put(containerID, policyFilePath); + try(OutputStream policyOutputStream = + Files.newOutputStream(policyFilePath)) { - NMContainerPolicyUtils.generatePolicyFile(policyOutputStream, - localDirs, groupPolicyFiles, resources, configuration); - NMContainerPolicyUtils.appendSecurityFlags( - commands, env, policyFilePath, sandboxMode); + containerPolicies.put(containerID, policyFilePath); + NMContainerPolicyUtils.generatePolicyFile(policyOutputStream, + localDirs, groupPolicyFiles, resources, configuration); + NMContainerPolicyUtils.appendSecurityFlags( + commands, env, policyFilePath, sandboxMode); + } } catch (IOException e) { throw new ContainerExecutionException(e); - } finally { - IOUtils.cleanupWithLogger(LOG, policyOutputStream); } } }