Merge r1470760 through r1471228 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-2802@1471229 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Tsz-wo Sze 2013-04-24 02:21:58 +00:00
commit 5f1e3b561a
56 changed files with 999 additions and 418 deletions

View File

@ -326,6 +326,10 @@ Trunk (Unreleased)
HDFS-4646. createNNProxyWithClientProtocol ignores configured timeout
value (Jagane Sundar via cos)
HDFS-4732. Fix TestDFSUpgradeFromImage which fails on Windows due to
failure to unpack old image tarball that contains hard links.
(Chris Nauroth via szetszwo)
BREAKDOWN OF HADOOP-8562 SUBTASKS AND RELATED JIRAS
HDFS-4145. Merge hdfs cmd line scripts from branch-1-win. (David Lao,
@ -360,6 +364,9 @@ Trunk (Unreleased)
HDFS-4674. TestBPOfferService fails on Windows due to failure parsing
datanode data directory as URI. (Chris Nauroth via suresh)
HDFS-4725. Fix HDFS file handle leaks in FSEditLog, NameNode,
OfflineEditsBinaryLoader and some tests. (Chris Nauroth via szetszwo)
BREAKDOWN OF HDFS-347 SUBTASKS AND RELATED JIRAS
HDFS-4353. Encapsulate connections to peers in Peer and PeerServer classes.
@ -573,6 +580,8 @@ Release 2.0.5-beta - UNRELEASED
HDFS-4695. TestEditLog leaks open file handles between tests.
(Ivan Mitic via suresh)
HDFS-4737. JVM path embedded in fuse binaries. (Sean Mackrory via atm)
Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -322,21 +322,23 @@ public class FSEditLog implements LogsPurgeable {
LOG.debug("Closing log when already closed");
return;
}
if (state == State.IN_SEGMENT) {
assert editLogStream != null;
waitForSyncToFinish();
endCurrentLogSegment(true);
}
if (journalSet != null && !journalSet.isEmpty()) {
try {
journalSet.close();
} catch (IOException ioe) {
LOG.warn("Error closing journalSet", ioe);
}
}
state = State.CLOSED;
try {
if (state == State.IN_SEGMENT) {
assert editLogStream != null;
waitForSyncToFinish();
endCurrentLogSegment(true);
}
} finally {
if (journalSet != null && !journalSet.isEmpty()) {
try {
journalSet.close();
} catch (IOException ioe) {
LOG.warn("Error closing journalSet", ioe);
}
}
state = State.CLOSED;
}
}
@ -588,6 +590,7 @@ public class FSEditLog implements LogsPurgeable {
"due to " + e.getMessage() + ". " +
"Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception());
IOUtils.cleanup(LOG, journalSet);
terminate(1, msg);
}
} finally {
@ -611,6 +614,7 @@ public class FSEditLog implements LogsPurgeable {
"Could not sync enough journals to persistent storage. "
+ "Unsynced transactions: " + (txid - synctxid);
LOG.fatal(msg, new Exception());
IOUtils.cleanup(LOG, journalSet);
terminate(1, msg);
}
}

View File

@ -651,13 +651,14 @@ public class NameNode {
}
} catch (ServiceFailedException e) {
LOG.warn("Encountered exception while exiting state ", e);
}
stopCommonServices();
if (metrics != null) {
metrics.shutdown();
}
if (namesystem != null) {
namesystem.shutdown();
} finally {
stopCommonServices();
if (metrics != null) {
metrics.shutdown();
}
if (namesystem != null) {
namesystem.shutdown();
}
}
}

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.hdfs.tools.offlineEditsViewer.OfflineEditsViewer;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp;
import org.apache.hadoop.hdfs.server.namenode.EditLogInputStream;
import org.apache.hadoop.io.IOUtils;
/**
* OfflineEditsBinaryLoader loads edits from a binary edits file
@ -59,43 +60,49 @@ class OfflineEditsBinaryLoader implements OfflineEditsLoader {
*/
@Override
public void loadEdits() throws IOException {
visitor.start(inputStream.getVersion());
while (true) {
try {
FSEditLogOp op = inputStream.readOp();
if (op == null)
break;
if (fixTxIds) {
if (nextTxId <= 0) {
nextTxId = op.getTransactionId();
try {
visitor.start(inputStream.getVersion());
while (true) {
try {
FSEditLogOp op = inputStream.readOp();
if (op == null)
break;
if (fixTxIds) {
if (nextTxId <= 0) {
nextTxId = 1;
nextTxId = op.getTransactionId();
if (nextTxId <= 0) {
nextTxId = 1;
}
}
op.setTransactionId(nextTxId);
nextTxId++;
}
op.setTransactionId(nextTxId);
nextTxId++;
visitor.visitOp(op);
} catch (IOException e) {
if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception
LOG.error("Got IOException at position " +
inputStream.getPosition());
visitor.close(e);
throw e;
}
LOG.error("Got IOException while reading stream! Resyncing.", e);
inputStream.resync();
} catch (RuntimeException e) {
if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception
LOG.error("Got RuntimeException at position " +
inputStream.getPosition());
visitor.close(e);
throw e;
}
LOG.error("Got RuntimeException while reading stream! Resyncing.", e);
inputStream.resync();
}
visitor.visitOp(op);
} catch (IOException e) {
if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception
LOG.error("Got IOException at position " + inputStream.getPosition());
visitor.close(e);
throw e;
}
LOG.error("Got IOException while reading stream! Resyncing.", e);
inputStream.resync();
} catch (RuntimeException e) {
if (!recoveryMode) {
// Tell the visitor to clean up, then re-throw the exception
LOG.error("Got RuntimeException at position " + inputStream.getPosition());
visitor.close(e);
throw e;
}
LOG.error("Got RuntimeException while reading stream! Resyncing.", e);
inputStream.resync();
}
visitor.close(null);
} finally {
IOUtils.cleanup(LOG, inputStream);
}
visitor.close(null);
}
}
}

View File

@ -16,6 +16,8 @@
# limitations under the License.
#
set(CMAKE_SKIP_RPATH TRUE)
# Find Linux FUSE
IF (${CMAKE_SYSTEM_NAME} MATCHES "Linux")
find_package(PkgConfig REQUIRED)

View File

@ -48,6 +48,8 @@ import java.util.Random;
import java.util.Set;
import java.util.concurrent.TimeoutException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.CommonConfigurationKeys;
@ -91,6 +93,8 @@ import com.google.common.base.Joiner;
/** Utilities for HDFS tests */
public class DFSTestUtil {
private static final Log LOG = LogFactory.getLog(DFSTestUtil.class);
private static Random gen = new Random();
private static String[] dirNames = {
@ -742,7 +746,11 @@ public class DFSTestUtil {
File file = new File(filename);
DataInputStream in = new DataInputStream(new FileInputStream(file));
byte[] content = new byte[(int)file.length()];
in.readFully(content);
try {
in.readFully(content);
} finally {
IOUtils.cleanup(LOG, in);
}
return content;
}

View File

@ -252,8 +252,9 @@ public class TestDFSUpgradeFromImage {
// Now try to start an NN from it
MiniDFSCluster cluster = null;
try {
new MiniDFSCluster.Builder(conf).numDataNodes(0)
cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
.format(false)
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
@ -264,6 +265,11 @@ public class TestDFSUpgradeFromImage {
if (!ioe.toString().contains("Old layout version is 'too old'")) {
throw ioe;
}
} finally {
// We expect startup to fail, but just in case it didn't, shutdown now.
if (cluster != null) {
cluster.shutdown();
}
}
}

View File

@ -631,44 +631,48 @@ public class TestDistributedFileSystem {
true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2).build();
DistributedFileSystem fs = cluster.getFileSystem();
// Create two files
Path tmpFile1 = new Path("/tmpfile1.dat");
Path tmpFile2 = new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs, tmpFile1, 1024, (short) 2, 0xDEADDEADl);
DFSTestUtil.createFile(fs, tmpFile2, 1024, (short) 2, 0xDEADDEADl);
// Get locations of blocks of both files and concat together
BlockLocation[] blockLocs1 = fs.getFileBlockLocations(tmpFile1, 0, 1024);
BlockLocation[] blockLocs2 = fs.getFileBlockLocations(tmpFile2, 0, 1024);
BlockLocation[] blockLocs = (BlockLocation[]) ArrayUtils.addAll(blockLocs1,
blockLocs2);
// Fetch VolumeBlockLocations in batch
BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
.asList(blockLocs));
int counter = 0;
// Print out the list of ids received for each block
for (BlockStorageLocation l : locs) {
for (int i = 0; i < l.getVolumeIds().length; i++) {
VolumeId id = l.getVolumeIds()[i];
String name = l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block " + counter
+ " on volume id " + id.toString());
try {
DistributedFileSystem fs = cluster.getFileSystem();
// Create two files
Path tmpFile1 = new Path("/tmpfile1.dat");
Path tmpFile2 = new Path("/tmpfile2.dat");
DFSTestUtil.createFile(fs, tmpFile1, 1024, (short) 2, 0xDEADDEADl);
DFSTestUtil.createFile(fs, tmpFile2, 1024, (short) 2, 0xDEADDEADl);
// Get locations of blocks of both files and concat together
BlockLocation[] blockLocs1 = fs.getFileBlockLocations(tmpFile1, 0, 1024);
BlockLocation[] blockLocs2 = fs.getFileBlockLocations(tmpFile2, 0, 1024);
BlockLocation[] blockLocs = (BlockLocation[]) ArrayUtils.addAll(blockLocs1,
blockLocs2);
// Fetch VolumeBlockLocations in batch
BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
.asList(blockLocs));
int counter = 0;
// Print out the list of ids received for each block
for (BlockStorageLocation l : locs) {
for (int i = 0; i < l.getVolumeIds().length; i++) {
VolumeId id = l.getVolumeIds()[i];
String name = l.getNames()[i];
if (id != null) {
System.out.println("Datanode " + name + " has block " + counter
+ " on volume id " + id.toString());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files", 2,
locs.length);
for (BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block", 2,
l.getVolumeIds().length);
for (int i = 0; i < l.getVolumeIds().length; i++) {
VolumeId id = l.getVolumeIds()[i];
String name = l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,
id.isValid());
}
}
counter++;
}
assertEquals("Expected two HdfsBlockLocations for two 1-block files", 2,
locs.length);
for (BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block", 2,
l.getVolumeIds().length);
for (int i = 0; i < l.getVolumeIds().length; i++) {
VolumeId id = l.getVolumeIds()[i];
String name = l.getNames()[i];
assertTrue("Expected block to be valid on datanode " + name,
id.isValid());
}
} finally {
cluster.shutdown();
}
}
@ -683,27 +687,31 @@ public class TestDistributedFileSystem {
true);
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
.numDataNodes(2).build();
cluster.getDataNodes();
DistributedFileSystem fs = cluster.getFileSystem();
// Create a file
Path tmpFile = new Path("/tmpfile1.dat");
DFSTestUtil.createFile(fs, tmpFile, 1024, (short) 2, 0xDEADDEADl);
// Get locations of blocks of the file
BlockLocation[] blockLocs = fs.getFileBlockLocations(tmpFile, 0, 1024);
// Stop a datanode to simulate a failure
cluster.stopDataNode(0);
// Fetch VolumeBlockLocations
BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
.asList(blockLocs));
try {
cluster.getDataNodes();
DistributedFileSystem fs = cluster.getFileSystem();
// Create a file
Path tmpFile = new Path("/tmpfile1.dat");
DFSTestUtil.createFile(fs, tmpFile, 1024, (short) 2, 0xDEADDEADl);
// Get locations of blocks of the file
BlockLocation[] blockLocs = fs.getFileBlockLocations(tmpFile, 0, 1024);
// Stop a datanode to simulate a failure
cluster.stopDataNode(0);
// Fetch VolumeBlockLocations
BlockStorageLocation[] locs = fs.getFileBlockStorageLocations(Arrays
.asList(blockLocs));
assertEquals("Expected one HdfsBlockLocation for one 1-block file", 1,
locs.length);
assertEquals("Expected one HdfsBlockLocation for one 1-block file", 1,
locs.length);
for (BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block", 2,
l.getVolumeIds().length);
assertTrue("Expected one valid and one invalid replica",
(l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid()));
for (BlockStorageLocation l : locs) {
assertEquals("Expected two replicas for each block", 2,
l.getVolumeIds().length);
assertTrue("Expected one valid and one invalid replica",
(l.getVolumeIds()[0].isValid()) ^ (l.getVolumeIds()[1].isValid()));
}
} finally {
cluster.shutdown();
}
}

View File

@ -781,49 +781,53 @@ public class TestQuota {
public void testMaxSpaceQuotas() throws Exception {
final Configuration conf = new HdfsConfiguration();
final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
// create test directory
final Path testFolder = new Path("/testFolder");
assertTrue(dfs.mkdirs(testFolder));
// setting namespace quota to Long.MAX_VALUE - 1 should work
dfs.setQuota(testFolder, Long.MAX_VALUE - 1, 10);
ContentSummary c = dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly", c.getQuota() == Long.MAX_VALUE - 1);
// setting diskspace quota to Long.MAX_VALUE - 1 should work
dfs.setQuota(testFolder, 10, Long.MAX_VALUE - 1);
c = dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly", c.getSpaceQuota() == Long.MAX_VALUE - 1);
// setting namespace quota to Long.MAX_VALUE should not work + no error
dfs.setQuota(testFolder, Long.MAX_VALUE, 10);
c = dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed", c.getQuota() == 10);
// setting diskspace quota to Long.MAX_VALUE should not work + no error
dfs.setQuota(testFolder, 10, Long.MAX_VALUE);
c = dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed", c.getSpaceQuota() == 10);
// setting namespace quota to Long.MAX_VALUE + 1 should not work + error
try {
dfs.setQuota(testFolder, Long.MAX_VALUE + 1, 10);
fail("Exception not thrown");
} catch (IllegalArgumentException e) {
// Expected
}
final FileSystem fs = cluster.getFileSystem();
assertTrue("Not a HDFS: "+fs.getUri(),
fs instanceof DistributedFileSystem);
final DistributedFileSystem dfs = (DistributedFileSystem)fs;
// setting diskspace quota to Long.MAX_VALUE + 1 should not work + error
try {
dfs.setQuota(testFolder, 10, Long.MAX_VALUE + 1);
fail("Exception not thrown");
} catch (IllegalArgumentException e) {
// Expected
// create test directory
final Path testFolder = new Path("/testFolder");
assertTrue(dfs.mkdirs(testFolder));
// setting namespace quota to Long.MAX_VALUE - 1 should work
dfs.setQuota(testFolder, Long.MAX_VALUE - 1, 10);
ContentSummary c = dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly", c.getQuota() == Long.MAX_VALUE - 1);
// setting diskspace quota to Long.MAX_VALUE - 1 should work
dfs.setQuota(testFolder, 10, Long.MAX_VALUE - 1);
c = dfs.getContentSummary(testFolder);
assertTrue("Quota not set properly", c.getSpaceQuota() == Long.MAX_VALUE - 1);
// setting namespace quota to Long.MAX_VALUE should not work + no error
dfs.setQuota(testFolder, Long.MAX_VALUE, 10);
c = dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed", c.getQuota() == 10);
// setting diskspace quota to Long.MAX_VALUE should not work + no error
dfs.setQuota(testFolder, 10, Long.MAX_VALUE);
c = dfs.getContentSummary(testFolder);
assertTrue("Quota should not have changed", c.getSpaceQuota() == 10);
// setting namespace quota to Long.MAX_VALUE + 1 should not work + error
try {
dfs.setQuota(testFolder, Long.MAX_VALUE + 1, 10);
fail("Exception not thrown");
} catch (IllegalArgumentException e) {
// Expected
}
// setting diskspace quota to Long.MAX_VALUE + 1 should not work + error
try {
dfs.setQuota(testFolder, 10, Long.MAX_VALUE + 1);
fail("Exception not thrown");
} catch (IllegalArgumentException e) {
// Expected
}
} finally {
cluster.shutdown();
}
}

View File

@ -255,7 +255,6 @@ public class TestEditLogJournalFailures {
doThrow(new IOException("fail on setReadyToFlush()")).when(spyElos)
.setReadyToFlush();
}
doNothing().when(spyElos).abort();
}
private EditLogFileOutputStream spyOnStream(JournalAndStream jas) {

View File

@ -530,7 +530,11 @@ public class TestStartup {
.manageDataDfsDirs(false)
.manageNameDfsDirs(false)
.build();
cluster.waitActive();
try {
cluster.waitActive();
} finally {
cluster.shutdown();
}
}
/**

View File

@ -206,6 +206,9 @@ Release 2.0.5-beta - UNRELEASED
instead of extracting and populating information itself to start any
container. (vinodkv)
MAPREDUCE-5175. Updated MR App to not set envs that will be set by NMs
anyways after YARN-561. (Xuan Gong via vinodkv)
OPTIMIZATIONS
MAPREDUCE-4974. Optimising the LineRecordReader initialize() method

View File

@ -52,6 +52,7 @@ import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.service.AbstractService;
/**
@ -280,7 +281,7 @@ public class LocalContainerLauncher extends AbstractService implements
// Use the AM's local dir env to generate the intermediate step
// output files
String[] localSysDirs = StringUtils.getTrimmedStrings(
System.getenv(ApplicationConstants.LOCAL_DIR_ENV));
System.getenv(Environment.LOCAL_DIRS.name()));
conf.setStrings(MRConfig.LOCAL_DIR, localSysDirs);
LOG.info(MRConfig.LOCAL_DIR + " for uber task: "
+ conf.get(MRConfig.LOCAL_DIR));

View File

@ -111,8 +111,6 @@ public class MapReduceChildJVM {
MRJobConfig.STDERR_LOGFILE_ENV,
getTaskLogFile(TaskLog.LogName.STDERR)
);
environment.put(MRJobConfig.APPLICATION_ATTEMPT_ID_ENV,
conf.get(MRJobConfig.APPLICATION_ATTEMPT_ID).toString());
}
private static String getChildJavaOpts(JobConf jobConf, boolean isMapTask) {

View File

@ -57,6 +57,9 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.log4j.LogManager;
/**
@ -216,7 +219,7 @@ class YarnChild {
*/
private static void configureLocalDirs(Task task, JobConf job) throws IOException {
String[] localSysDirs = StringUtils.getTrimmedStrings(
System.getenv(ApplicationConstants.LOCAL_DIR_ENV));
System.getenv(Environment.LOCAL_DIRS.name()));
job.setStrings(MRConfig.LOCAL_DIR, localSysDirs);
LOG.info(MRConfig.LOCAL_DIR + " for child: " + job.get(MRConfig.LOCAL_DIR));
LocalDirAllocator lDirAlloc = new LocalDirAllocator(MRConfig.LOCAL_DIR);
@ -256,12 +259,14 @@ class YarnChild {
final JobConf job = new JobConf(MRJobConfig.JOB_CONF_FILE);
job.setCredentials(credentials);
String appAttemptIdEnv = System
.getenv(MRJobConfig.APPLICATION_ATTEMPT_ID_ENV);
LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptIdEnv);
ApplicationAttemptId appAttemptId =
ConverterUtils.toContainerId(
System.getenv(Environment.CONTAINER_ID.name()))
.getApplicationAttemptId();
LOG.debug("APPLICATION_ATTEMPT_ID: " + appAttemptId);
// Set it in conf, so as to be able to be used the the OutputCommitter.
job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, Integer
.parseInt(appAttemptIdEnv));
job.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID,
appAttemptId.getAttemptId());
// set tcp nodelay
job.setBoolean("ipc.client.tcpnodelay", true);

View File

@ -116,6 +116,7 @@ import org.apache.hadoop.yarn.SystemClock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.YarnUncaughtExceptionHandler;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -1270,22 +1271,22 @@ public class MRAppMaster extends CompositeService {
try {
Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler());
String containerIdStr =
System.getenv(ApplicationConstants.AM_CONTAINER_ID_ENV);
String nodeHostString = System.getenv(ApplicationConstants.NM_HOST_ENV);
String nodePortString = System.getenv(ApplicationConstants.NM_PORT_ENV);
System.getenv(Environment.CONTAINER_ID.name());
String nodeHostString = System.getenv(Environment.NM_HOST.name());
String nodePortString = System.getenv(Environment.NM_PORT.name());
String nodeHttpPortString =
System.getenv(ApplicationConstants.NM_HTTP_PORT_ENV);
System.getenv(Environment.NM_HTTP_PORT.name());
String appSubmitTimeStr =
System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);
String maxAppAttempts =
System.getenv(ApplicationConstants.MAX_APP_ATTEMPTS_ENV);
validateInputParam(containerIdStr,
ApplicationConstants.AM_CONTAINER_ID_ENV);
validateInputParam(nodeHostString, ApplicationConstants.NM_HOST_ENV);
validateInputParam(nodePortString, ApplicationConstants.NM_PORT_ENV);
Environment.CONTAINER_ID.name());
validateInputParam(nodeHostString, Environment.NM_HOST.name());
validateInputParam(nodePortString, Environment.NM_PORT.name());
validateInputParam(nodeHttpPortString,
ApplicationConstants.NM_HTTP_PORT_ENV);
Environment.NM_HTTP_PORT.name());
validateInputParam(appSubmitTimeStr,
ApplicationConstants.APP_SUBMIT_TIME_ENV);
validateInputParam(maxAppAttempts,

View File

@ -569,8 +569,6 @@ public interface MRJobConfig {
public static final String STDOUT_LOGFILE_ENV = "STDOUT_LOGFILE_ENV";
public static final String STDERR_LOGFILE_ENV = "STDERR_LOGFILE_ENV";
public static final String APPLICATION_ATTEMPT_ID_ENV = "APPLICATION_ATTEMPT_ID_ENV";
// This should be the directory where splits file gets localized on the node
// running ApplicationMaster.
public static final String JOB_SUBMIT_DIR = "jobSubmitDir";

View File

@ -95,6 +95,9 @@ Release 2.0.5-beta - UNRELEASED
YARN-441. Removed unused utility methods for collections from two API
records. (Xuan Gong via vinodkv)
YARN-561. Modified NodeManager to set key information into the environment
of every container that it launches. (Xuan Gong via vinodkv)
NEW FEATURES
YARN-482. FS: Extend SchedulingMode to intermediate queues.
@ -167,6 +170,13 @@ Release 2.0.5-beta - UNRELEASED
YARN-542. Changed the default global AM max-attempts value to be not one.
(Zhijie Shen via vinodkv)
YARN-583. Moved application level local resources to be localized under the
filecache sub-directory under application directory. (Omkar Vinit Joshi via
vinodkv)
YARN-581. Added a test to verify that app delegation tokens are restored
after RM restart. (Jian He via vinodkv)
OPTIMIZATIONS
BUG FIXES
@ -277,6 +287,9 @@ Release 2.0.5-beta - UNRELEASED
YARN-594. Update test and add comments in YARN-534 (Jian He via bikas)
YARN-549. YarnClient.submitApplication should wait for application to be
accepted by the RM (Zhijie Shen via bikas)
Release 2.0.4-alpha - UNRELEASED
INCOMPATIBLE CHANGES

View File

@ -37,30 +37,6 @@ public interface ApplicationConstants {
public static final String APPLICATION_CLIENT_SECRET_ENV_NAME =
"AppClientSecretEnv";
/**
* The environment variable for CONTAINER_ID. Set in AppMaster environment
* only
*/
public static final String AM_CONTAINER_ID_ENV = "AM_CONTAINER_ID";
/**
* The environment variable for the NM_HOST. Set in the AppMaster environment
* only
*/
public static final String NM_HOST_ENV = "NM_HOST";
/**
* The environment variable for the NM_PORT. Set in the AppMaster environment
* only
*/
public static final String NM_PORT_ENV = "NM_PORT";
/**
* The environment variable for the NM_HTTP_PORT. Set in the AppMaster environment
* only
*/
public static final String NM_HTTP_PORT_ENV = "NM_HTTP_PORT";
/**
* The environment variable for APP_SUBMIT_TIME. Set in AppMaster environment
* only
@ -70,8 +46,6 @@ public interface ApplicationConstants {
public static final String CONTAINER_TOKEN_FILE_ENV_NAME =
UserGroupInformation.HADOOP_TOKEN_FILE_LOCATION;
public static final String LOCAL_DIR_ENV = "YARN_LOCAL_DIRS";
/**
* The environmental variable for APPLICATION_WEB_PROXY_BASE. Set in
* ApplicationMaster's environment only. This states that for all non-relative
@ -177,7 +151,37 @@ public interface ApplicationConstants {
/**
* $HADOOP_YARN_HOME
*/
HADOOP_YARN_HOME("HADOOP_YARN_HOME");
HADOOP_YARN_HOME("HADOOP_YARN_HOME"),
/**
* $CONTAINER_ID
* Final, exported by NodeManager and non-modifiable by users.
*/
CONTAINER_ID("CONTAINER_ID"),
/**
* $NM_HOST
* Final, exported by NodeManager and non-modifiable by users.
*/
NM_HOST("NM_HOST"),
/**
* $NM_HTTP_PORT
* Final, exported by NodeManager and non-modifiable by users.
*/
NM_HTTP_PORT("NM_HTTP_PORT"),
/**
* $NM_PORT
* Final, exported by NodeManager and non-modifiable by users.
*/
NM_PORT("NM_PORT"),
/**
* $LOCAL_DIRS
* Final, exported by NodeManager and non-modifiable by users.
*/
LOCAL_DIRS("LOCAL_DIRS");
private final String variable;
private Environment(String variable) {

View File

@ -96,7 +96,9 @@ public interface ClientRMProtocol {
*
* <p>Currently the <code>ResourceManager</code> sends an immediate (empty)
* {@link SubmitApplicationResponse} on accepting the submission and throws
* an exception if it rejects the submission.</p>
* an exception if it rejects the submission. However, this call needs to be
* followed by {@link #getApplicationReport(GetApplicationReportRequest)}
* to make sure that the application gets properly submitted.</p>
*
* <p> In secure mode,the <code>ResourceManager</code> verifies access to
* queues etc. before accepting the application submission.</p>

View File

@ -45,6 +45,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.AMRMProtocol;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.ContainerExitStatus;
import org.apache.hadoop.yarn.api.ContainerManager;
@ -320,7 +321,7 @@ public class ApplicationMaster {
Map<String, String> envs = System.getenv();
if (!envs.containsKey(ApplicationConstants.AM_CONTAINER_ID_ENV)) {
if (!envs.containsKey(Environment.CONTAINER_ID.name())) {
if (cliParser.hasOption("app_attempt_id")) {
String appIdStr = cliParser.getOptionValue("app_attempt_id", "");
appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr);
@ -330,7 +331,7 @@ public class ApplicationMaster {
}
} else {
ContainerId containerId = ConverterUtils.toContainerId(envs
.get(ApplicationConstants.AM_CONTAINER_ID_ENV));
.get(Environment.CONTAINER_ID.name()));
appAttemptID = containerId.getApplicationAttemptId();
}
@ -338,16 +339,16 @@ public class ApplicationMaster {
throw new RuntimeException(ApplicationConstants.APP_SUBMIT_TIME_ENV
+ " not set in the environment");
}
if (!envs.containsKey(ApplicationConstants.NM_HOST_ENV)) {
throw new RuntimeException(ApplicationConstants.NM_HOST_ENV
if (!envs.containsKey(Environment.NM_HOST.name())) {
throw new RuntimeException(Environment.NM_HOST.name()
+ " not set in the environment");
}
if (!envs.containsKey(ApplicationConstants.NM_HTTP_PORT_ENV)) {
throw new RuntimeException(ApplicationConstants.NM_HTTP_PORT_ENV
if (!envs.containsKey(Environment.NM_HTTP_PORT.name())) {
throw new RuntimeException(Environment.NM_HTTP_PORT
+ " not set in the environment");
}
if (!envs.containsKey(ApplicationConstants.NM_PORT_ENV)) {
throw new RuntimeException(ApplicationConstants.NM_PORT_ENV
if (!envs.containsKey(Environment.NM_PORT.name())) {
throw new RuntimeException(Environment.NM_PORT.name()
+ " not set in the environment");
}

View File

@ -37,6 +37,7 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
@ -58,7 +59,7 @@ import org.apache.hadoop.yarn.util.Records;
* creates a new application on the RM and negotiates a new attempt id. Then it
* waits for the RM app state to reach be YarnApplicationState.ACCEPTED after
* which it spawns the AM in another process and passes it the container id via
* env variable ApplicationConstants.AM_CONTAINER_ID_ENV. The AM can be in any
* env variable Environment.CONTAINER_ID. The AM can be in any
* language. The AM can register with the RM using the attempt id obtained
* from the container id and proceed as normal.
* The client redirects app stdout and stderr to its own stdout and
@ -190,10 +191,11 @@ public class UnmanagedAMLauncher {
containerId.setId(0);
String hostname = InetAddress.getLocalHost().getHostName();
envAMList.add(ApplicationConstants.AM_CONTAINER_ID_ENV + "=" + containerId);
envAMList.add(ApplicationConstants.NM_HOST_ENV + "=" + hostname);
envAMList.add(ApplicationConstants.NM_HTTP_PORT_ENV + "=0");
envAMList.add(ApplicationConstants.NM_PORT_ENV + "=0");
envAMList.add(Environment.CONTAINER_ID.name() + "=" + containerId);
envAMList.add(Environment.NM_HOST.name() + "=" + hostname);
envAMList.add(Environment.NM_HTTP_PORT.name() + "=0");
envAMList.add(Environment.NM_PORT.name() + "=0");
envAMList.add(Environment.LOCAL_DIRS.name() + "= /tmp");
envAMList.add(ApplicationConstants.APP_SUBMIT_TIME_ENV + "="
+ System.currentTimeMillis());

View File

@ -63,7 +63,9 @@ public interface YarnClient extends Service {
/**
* <p>
* Submit a new application to <code>YARN.</code>
* Submit a new application to <code>YARN.</code> It is a blocking call, such
* that it will not return {@link ApplicationId} until the submitted
* application has been submitted and accepted by the ResourceManager.
* </p>
*
* @param appContext

View File

@ -53,9 +53,11 @@ import org.apache.hadoop.yarn.api.records.DelegationToken;
import org.apache.hadoop.yarn.api.records.NodeReport;
import org.apache.hadoop.yarn.api.records.QueueInfo;
import org.apache.hadoop.yarn.api.records.QueueUserACLInfo;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.service.AbstractService;
import org.apache.hadoop.yarn.util.Records;
@ -68,6 +70,7 @@ public class YarnClientImpl extends AbstractService implements YarnClient {
protected ClientRMProtocol rmClient;
protected InetSocketAddress rmAddress;
protected long statePollIntervalMillis;
private static final String ROOT = "root";
@ -90,6 +93,9 @@ public class YarnClientImpl extends AbstractService implements YarnClient {
if (this.rmAddress == null) {
this.rmAddress = getRmAddress(conf);
}
statePollIntervalMillis = conf.getLong(
YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,
YarnConfiguration.DEFAULT_YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS);
super.init(conf);
}
@ -131,6 +137,29 @@ public class YarnClientImpl extends AbstractService implements YarnClient {
Records.newRecord(SubmitApplicationRequest.class);
request.setApplicationSubmissionContext(appContext);
rmClient.submitApplication(request);
int pollCount = 0;
while (true) {
YarnApplicationState state =
getApplicationReport(applicationId).getYarnApplicationState();
if (!state.equals(YarnApplicationState.NEW) &&
!state.equals(YarnApplicationState.NEW_SAVING)) {
break;
}
// Notify the client through the log every 10 poll, in case the client
// is blocked here too long.
if (++pollCount % 10 == 0) {
LOG.info("Application submission is not finished, " +
"submitted application " + applicationId +
" is still in " + state);
}
try {
Thread.sleep(statePollIntervalMillis);
} catch (InterruptedException ie) {
}
}
LOG.info("Submitted application " + applicationId + " to ResourceManager"
+ " at " + rmAddress);
return applicationId;

View File

@ -18,10 +18,25 @@
package org.apache.hadoop.yarn.client;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.times;
import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.client.YarnClient;
import org.apache.hadoop.yarn.client.YarnClientImpl;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportRequest;
import org.apache.hadoop.yarn.api.protocolrecords.GetApplicationReportResponse;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Test;
public class TestYarnClient {
@ -43,4 +58,76 @@ public class TestYarnClient {
client.start();
client.stop();
}
@Test (timeout = 30000)
public void testSubmitApplication() {
Configuration conf = new Configuration();
conf.setLong(YarnConfiguration.YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS,
100); // speed up tests
final YarnClient client = new MockYarnClient();
client.init(conf);
client.start();
YarnApplicationState[] exitStates = new YarnApplicationState[]
{
YarnApplicationState.SUBMITTED,
YarnApplicationState.ACCEPTED,
YarnApplicationState.RUNNING,
YarnApplicationState.FINISHED,
YarnApplicationState.FAILED,
YarnApplicationState.KILLED
};
for (int i = 0; i < exitStates.length; ++i) {
ApplicationSubmissionContext context =
mock(ApplicationSubmissionContext.class);
ApplicationId applicationId = Records.newRecord(ApplicationId.class);
applicationId.setClusterTimestamp(System.currentTimeMillis());
applicationId.setId(i);
when(context.getApplicationId()).thenReturn(applicationId);
((MockYarnClient) client).setYarnApplicationState(exitStates[i]);
try {
client.submitApplication(context);
} catch (YarnRemoteException e) {
Assert.fail("Exception is not expected.");
}
verify(((MockYarnClient) client).mockReport,times(4 * i + 4))
.getYarnApplicationState();
}
client.stop();
}
private static class MockYarnClient extends YarnClientImpl {
private ApplicationReport mockReport;
public MockYarnClient() {
super();
}
@Override
public void start() {
rmClient = mock(ClientRMProtocol.class);
GetApplicationReportResponse mockResponse =
mock(GetApplicationReportResponse.class);
mockReport = mock(ApplicationReport.class);
try{
when(rmClient.getApplicationReport(any(
GetApplicationReportRequest.class))).thenReturn(mockResponse);
} catch (YarnRemoteException e) {
Assert.fail("Exception is not expected.");
}
when(mockResponse.getApplicationReport()).thenReturn(mockReport);
}
@Override
public void stop() {
}
public void setYarnApplicationState(YarnApplicationState state) {
when(mockReport.getYarnApplicationState()).thenReturn(
YarnApplicationState.NEW, YarnApplicationState.NEW_SAVING,
YarnApplicationState.NEW_SAVING, state);
}
}
}

View File

@ -692,6 +692,19 @@ public class YarnConfiguration extends Configuration {
*/
public static boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false;
////////////////////////////////
// Other Configs
////////////////////////////////
/**
* The interval of the yarn client's querying application state after
* application submission. The unit is millisecond.
*/
public static final String YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS =
YARN_PREFIX + "client.app-submission.poll-interval";
public static final long DEFAULT_YARN_CLIENT_APP_SUBMISSION_POLL_INTERVAL_MS =
1000;
public YarnConfiguration() {
super();
}

View File

@ -708,4 +708,12 @@
<value>$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/share/hadoop/common/*,$HADOOP_COMMON_HOME/share/hadoop/common/lib/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/*,$HADOOP_HDFS_HOME/share/hadoop/hdfs/lib/*,$HADOOP_YARN_HOME/share/hadoop/yarn/*,$HADOOP_YARN_HOME/share/hadoop/yarn/lib/*</value>
</property>
<!-- Other configuration -->
<property>
<description>The interval of the yarn client's querying application state
after application submission. The unit is millisecond.</description>
<name>yarn.client.app-submission.poll-interval</name>
<value>1000</value>
</property>
</configuration>

View File

@ -113,13 +113,13 @@ public class DefaultContainerExecutor extends ContainerExecutor {
List<String> localDirs, List<String> logDirs) throws IOException {
FsPermission dirPerm = new FsPermission(APPDIR_PERM);
ContainerId containerId = container.getContainerID();
ContainerId containerId = container.getContainer().getId();
// create container dirs on all disks
String containerIdStr = ConverterUtils.toString(containerId);
String appIdStr =
ConverterUtils.toString(
container.getContainerID().getApplicationAttemptId().
containerId.getApplicationAttemptId().
getApplicationId());
for (String sLocalDir : localDirs) {
Path usersdir = new Path(sLocalDir, ContainerLocalizer.USERCACHE);

View File

@ -216,11 +216,11 @@ public class LinuxContainerExecutor extends ContainerExecutor {
String user, String appId, Path containerWorkDir,
List<String> localDirs, List<String> logDirs) throws IOException {
ContainerId containerId = container.getContainerID();
ContainerId containerId = container.getContainer().getId();
String containerIdStr = ConverterUtils.toString(containerId);
resourcesHandler.preExecute(containerId,
container.getResource());
container.getContainer().getResource());
String resourcesOptions = resourcesHandler.getResourcesOption(
containerId);

View File

@ -36,8 +36,8 @@ public class ApplicationContainerInitEvent extends ApplicationEvent {
final Container container;
public ApplicationContainerInitEvent(Container container) {
super(container.getContainerID().getApplicationAttemptId().getApplicationId(),
ApplicationEventType.INIT_CONTAINER);
super(container.getContainer().getId().getApplicationAttemptId()
.getApplicationId(), ApplicationEventType.INIT_CONTAINER);
this.container = container;
}

View File

@ -274,14 +274,14 @@ public class ApplicationImpl implements Application {
ApplicationContainerInitEvent initEvent =
(ApplicationContainerInitEvent) event;
Container container = initEvent.getContainer();
app.containers.put(container.getContainerID(), container);
LOG.info("Adding " + container.getContainerID()
app.containers.put(container.getContainer().getId(), container);
LOG.info("Adding " + container.getContainer().getId()
+ " to application " + app.toString());
switch (app.getApplicationState()) {
case RUNNING:
app.dispatcher.getEventHandler().handle(new ContainerInitEvent(
container.getContainerID()));
container.getContainer().getId()));
break;
case INITING:
case NEW:
@ -302,7 +302,7 @@ public class ApplicationImpl implements Application {
// Start all the containers waiting for ApplicationInit
for (Container container : app.containers.values()) {
app.dispatcher.getEventHandler().handle(new ContainerInitEvent(
container.getContainerID()));
container.getContainer().getId()));
}
}
}

View File

@ -25,12 +25,11 @@ import org.apache.hadoop.fs.Path;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.event.EventHandler;
public interface Container extends EventHandler<ContainerEvent> {
org.apache.hadoop.yarn.api.records.ContainerId getContainerID();
org.apache.hadoop.yarn.api.records.Container getContainer();
String getUser();
@ -46,5 +45,4 @@ public interface Container extends EventHandler<ContainerEvent> {
String toString();
Resource getResource();
}

View File

@ -42,7 +42,6 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
@ -312,16 +311,6 @@ public class ContainerImpl implements Container {
}
}
@Override
public ContainerId getContainerID() {
this.readLock.lock();
try {
return this.container.getId();
} finally {
this.readLock.unlock();
}
}
@Override
public String getUser() {
this.readLock.lock();
@ -385,10 +374,10 @@ public class ContainerImpl implements Container {
}
@Override
public Resource getResource() {
public org.apache.hadoop.yarn.api.records.Container getContainer() {
this.readLock.lock();
try {
return this.container.getResource();
return this.container;
} finally {
this.readLock.unlock();
}

View File

@ -118,7 +118,7 @@ public class ContainerLaunch implements Callable<Integer> {
final ContainerLaunchContext launchContext = container.getLaunchContext();
final Map<Path,List<String>> localResources =
container.getLocalizedResources();
ContainerId containerID = container.getContainerID();
ContainerId containerID = container.getContainer().getId();
String containerIdStr = ConverterUtils.toString(containerID);
final String user = launchContext.getUser();
final List<String> command = launchContext.getCommands();
@ -299,7 +299,7 @@ public class ContainerLaunch implements Callable<Integer> {
* @throws IOException
*/
public void cleanupContainer() throws IOException {
ContainerId containerId = container.getContainerID();
ContainerId containerId = container.getContainer().getId();
String containerIdStr = ConverterUtils.toString(containerId);
LOG.info("Cleaning up container " + containerIdStr);
@ -370,7 +370,7 @@ public class ContainerLaunch implements Callable<Integer> {
*/
private String getContainerPid(Path pidFilePath) throws Exception {
String containerIdStr =
ConverterUtils.toString(container.getContainerID());
ConverterUtils.toString(container.getContainer().getId());
String processId = null;
LOG.debug("Accessing pid for container " + containerIdStr
+ " from pid file " + pidFilePath);
@ -547,6 +547,21 @@ public class ContainerLaunch implements Callable<Integer> {
* Non-modifiable environment variables
*/
environment.put(Environment.CONTAINER_ID.name(), container
.getContainer().getId().toString());
environment.put(Environment.NM_PORT.name(),
String.valueOf(container.getContainer().getNodeId().getPort()));
environment.put(Environment.NM_HOST.name(), container.getContainer()
.getNodeId().getHost());
environment.put(Environment.NM_HTTP_PORT.name(), container.getContainer()
.getNodeHttpAddress().split(":")[1]);
environment.put(Environment.LOCAL_DIRS.name(),
StringUtils.join(",", appDirs));
putEnvIfNotNull(environment, Environment.USER.name(), container.getUser());
putEnvIfNotNull(environment,
@ -566,11 +581,6 @@ public class ContainerLaunch implements Callable<Integer> {
Environment.HADOOP_CONF_DIR.name(),
System.getenv(Environment.HADOOP_CONF_DIR.name())
);
putEnvIfNotNull(environment,
ApplicationConstants.LOCAL_DIR_ENV,
StringUtils.join(",", appDirs)
);
if (!Shell.WINDOWS) {
environment.put("JVM_PID", "$$");

View File

@ -111,7 +111,7 @@ public class ContainersLauncher extends AbstractService
public void handle(ContainersLauncherEvent event) {
// TODO: ContainersLauncher launches containers one by one!!
Container container = event.getContainer();
ContainerId containerId = container.getContainerID();
ContainerId containerId = container.getContainer().getId();
switch (event.getType()) {
case LAUNCH_CONTAINER:
Application app =

View File

@ -27,6 +27,7 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URISyntaxException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.EnumSet;
import java.util.HashMap;
@ -46,7 +47,6 @@ import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -65,6 +65,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.DiskChecker;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -358,13 +359,15 @@ public class ResourceLocalizationService extends CompositeService
ContainerLocalizationRequestEvent rsrcReqs) {
Container c = rsrcReqs.getContainer();
LocalizerContext ctxt = new LocalizerContext(
c.getUser(), c.getContainerID(), c.getCredentials());
c.getUser(), c.getContainer().getId(), c.getCredentials());
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs =
rsrcReqs.getRequestedResources();
for (Map.Entry<LocalResourceVisibility, Collection<LocalResourceRequest>> e :
rsrcs.entrySet()) {
LocalResourcesTracker tracker = getLocalResourcesTracker(e.getKey(), c.getUser(),
c.getContainerID().getApplicationAttemptId().getApplicationId());
LocalResourcesTracker tracker =
getLocalResourcesTracker(e.getKey(), c.getUser(),
c.getContainer().getId().getApplicationAttemptId()
.getApplicationId());
for (LocalResourceRequest req : e.getValue()) {
tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt));
}
@ -393,19 +396,21 @@ public class ResourceLocalizationService extends CompositeService
for (Map.Entry<LocalResourceVisibility, Collection<LocalResourceRequest>> e :
rsrcs.entrySet()) {
LocalResourcesTracker tracker = getLocalResourcesTracker(e.getKey(), c.getUser(),
c.getContainerID().getApplicationAttemptId().getApplicationId());
c.getContainer().getId().getApplicationAttemptId()
.getApplicationId());
for (LocalResourceRequest req : e.getValue()) {
tracker.handle(new ResourceReleaseEvent(req, c.getContainerID()));
tracker.handle(new ResourceReleaseEvent(req,
c.getContainer().getId()));
}
}
String locId = ConverterUtils.toString(c.getContainerID());
String locId = ConverterUtils.toString(c.getContainer().getId());
localizerTracker.cleanupPrivLocalizers(locId);
// Delete the container directories
String userName = c.getUser();
String containerIDStr = c.toString();
String appIDStr = ConverterUtils.toString(
c.getContainerID().getApplicationAttemptId().getApplicationId());
c.getContainer().getId().getApplicationAttemptId().getApplicationId());
for (String localDir : dirsHandler.getLocalDirs()) {
// Delete the user-owned container-dir
@ -424,8 +429,9 @@ public class ResourceLocalizationService extends CompositeService
delService.delete(null, containerSysDir, new Path[] {});
}
dispatcher.getEventHandler().handle(new ContainerEvent(c.getContainerID(),
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
dispatcher.getEventHandler().handle(
new ContainerEvent(c.getContainer().getId(),
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
}
@ -481,18 +487,15 @@ public class ResourceLocalizationService extends CompositeService
}
private String getUserFileCachePath(String user) {
String path =
"." + Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR
+ user + Path.SEPARATOR + ContainerLocalizer.FILECACHE;
return path;
return StringUtils.join(Path.SEPARATOR, Arrays.asList(".",
ContainerLocalizer.USERCACHE, user, ContainerLocalizer.FILECACHE));
}
private String getUserAppCachePath(String user, String appId) {
String path =
"." + Path.SEPARATOR + ContainerLocalizer.USERCACHE + Path.SEPARATOR
+ user + Path.SEPARATOR + ContainerLocalizer.APPCACHE
+ Path.SEPARATOR + appId;
return path;
private String getAppFileCachePath(String user, String appId) {
return StringUtils.join(Path.SEPARATOR, Arrays.asList(".",
ContainerLocalizer.USERCACHE, user, ContainerLocalizer.APPCACHE, appId,
ContainerLocalizer.FILECACHE));
}
@VisibleForTesting
@ -942,7 +945,7 @@ public class ResourceLocalizationService extends CompositeService
if (vis == LocalResourceVisibility.PRIVATE) {// PRIVATE Only
cacheDirectory = getUserFileCachePath(user);
} else {// APPLICATION ONLY
cacheDirectory = getUserAppCachePath(user, appId.toString());
cacheDirectory = getAppFileCachePath(user, appId.toString());
}
Path dirPath =
dirsHandler.getLocalPathForWrite(cacheDirectory,

View File

@ -60,7 +60,7 @@ public class ContainerInfo {
public ContainerInfo(final Context nmContext, final Container container,
String requestUri, String pathPrefix) {
this.id = container.getContainerID().toString();
this.id = container.getContainer().getId().toString();
this.nodeId = nmContext.getNodeId().toString();
ContainerStatus containerData = container.cloneAndGetContainerStatus();
this.exitCode = containerData.getExitStatus();
@ -74,7 +74,7 @@ public class ContainerInfo {
}
this.user = container.getUser();
Resource res = container.getResource();
Resource res = container.getContainer().getResource();
if (res != null) {
this.totalMemoryNeededMB = res.getMemory();
}

View File

@ -88,10 +88,10 @@ public class DummyContainerManager extends ContainerManagerImpl {
.getRequestedResources().values()) {
for (LocalResourceRequest req : rc) {
LOG.info("DEBUG: " + req + ":"
+ rsrcReqs.getContainer().getContainerID());
+ rsrcReqs.getContainer().getContainer().getId());
dispatcher.getEventHandler().handle(
new ContainerResourceLocalizedEvent(rsrcReqs.getContainer()
.getContainerID(), req, new Path("file:///local"
.getContainer().getId(), req, new Path("file:///local"
+ req.getPath().toUri().getPath())));
}
}
@ -101,7 +101,7 @@ public class DummyContainerManager extends ContainerManagerImpl {
((ContainerLocalizationEvent) event).getContainer();
// TODO: delete the container dir
this.dispatcher.getEventHandler().handle(
new ContainerEvent(container.getContainerID(),
new ContainerEvent(container.getContainer().getId(),
ContainerEventType.CONTAINER_RESOURCES_CLEANEDUP));
break;
case DESTROY_APPLICATION_RESOURCES:
@ -130,7 +130,7 @@ public class DummyContainerManager extends ContainerManagerImpl {
@Override
public void handle(ContainersLauncherEvent event) {
Container container = event.getContainer();
ContainerId containerId = container.getContainerID();
ContainerId containerId = container.getContainer().getId();
switch (event.getType()) {
case LAUNCH_CONTAINER:
dispatcher.getEventHandler().handle(

View File

@ -186,7 +186,10 @@ public class TestLinuxContainerExecutor {
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String,String>();
when(container.getContainerID()).thenReturn(cId);
org.apache.hadoop.yarn.api.records.Container containerAPI =
mock(org.apache.hadoop.yarn.api.records.Container.class);
when(container.getContainer()).thenReturn(containerAPI);
when(container.getContainer().getId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(context.getEnvironment()).thenReturn(env);

View File

@ -107,7 +107,10 @@ public class TestLinuxContainerExecutorWithMocks {
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String,String>();
when(container.getContainerID()).thenReturn(cId);
org.apache.hadoop.yarn.api.records.Container containerAPI =
mock(org.apache.hadoop.yarn.api.records.Container.class);
when(container.getContainer()).thenReturn(containerAPI);
when(container.getContainer().getId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.toString()).thenReturn(containerId);
@ -225,7 +228,10 @@ public class TestLinuxContainerExecutorWithMocks {
ContainerLaunchContext context = mock(ContainerLaunchContext.class);
HashMap<String, String> env = new HashMap<String, String>();
when(container.getContainerID()).thenReturn(cId);
org.apache.hadoop.yarn.api.records.Container containerAPI =
mock(org.apache.hadoop.yarn.api.records.Container.class);
when(container.getContainer()).thenReturn(containerAPI);
when(container.getContainer().getId()).thenReturn(cId);
when(container.getLaunchContext()).thenReturn(context);
when(cId.toString()).thenReturn(containerId);

View File

@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -178,6 +179,9 @@ public class TestNodeManagerShutdown {
Container mockContainer = mock(Container.class);
when(mockContainer.getId()).thenReturn(cId);
NodeId nodeId = BuilderUtils.newNodeId("localhost", 1234);
when(mockContainer.getNodeId()).thenReturn(nodeId);
when(mockContainer.getNodeHttpAddress()).thenReturn("localhost:12345");
containerLaunchContext.setUser(user);
URL localResourceUri =

View File

@ -150,6 +150,10 @@ public class TestContainerManager extends BaseContainerManagerTest {
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getResource()).thenReturn(
BuilderUtils.newResource(512, 1));
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeHttpAddress()).thenReturn(
context.getNodeId().getHost() + ":12345");
StartContainerRequest startRequest =
recordFactory.newRecordInstance(StartContainerRequest.class);
@ -245,6 +249,10 @@ public class TestContainerManager extends BaseContainerManagerTest {
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getResource()).thenReturn(
BuilderUtils.newResource(100, 1)); // MB
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeHttpAddress()).thenReturn(
context.getNodeId().getHost() + ":12345");
StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
startRequest.setContainerLaunchContext(containerLaunchContext);
startRequest.setContainer(mockContainer);
@ -352,7 +360,9 @@ public class TestContainerManager extends BaseContainerManagerTest {
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getResource()).thenReturn(
BuilderUtils.newResource(100, 1)); // MB
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeHttpAddress()).thenReturn(
context.getNodeId().getHost() + ":12345");
StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class);
startRequest.setContainerLaunchContext(containerLaunchContext);
startRequest.setContainer(mockContainer);
@ -444,6 +454,9 @@ public class TestContainerManager extends BaseContainerManagerTest {
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getResource()).thenReturn(
BuilderUtils.newResource(100, 1));
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeHttpAddress()).thenReturn(
context.getNodeId().getHost() + ":12345");
// containerLaunchContext.command = new ArrayList<CharSequence>();

View File

@ -83,7 +83,7 @@ public class TestApplication {
for (int i = 0; i < wa.containers.size(); i++) {
verify(wa.containerBus).handle(
argThat(new ContainerInitMatcher(wa.containers.get(i)
.getContainerID())));
.getContainer().getId())));
}
} finally {
if (wa != null)
@ -108,7 +108,7 @@ public class TestApplication {
assertEquals(ApplicationState.RUNNING, wa.app.getApplicationState());
verify(wa.containerBus).handle(
argThat(new ContainerInitMatcher(wa.containers.get(0)
.getContainerID())));
.getContainer().getId())));
wa.initContainer(1);
wa.initContainer(2);
@ -118,7 +118,7 @@ public class TestApplication {
for (int i = 1; i < wa.containers.size(); i++) {
verify(wa.containerBus).handle(
argThat(new ContainerInitMatcher(wa.containers.get(i)
.getContainerID())));
.getContainer().getId())));
}
} finally {
if (wa != null)
@ -233,7 +233,7 @@ public class TestApplication {
for (int i = 1; i < wa.containers.size(); i++) {
verify(wa.containerBus).handle(
argThat(new ContainerKillMatcher(wa.containers.get(i)
.getContainerID())));
.getContainer().getId())));
}
wa.containerFinished(1);
@ -354,7 +354,7 @@ public class TestApplication {
verify(wa.containerBus).handle(
argThat(new ContainerKillMatcher(wa.containers.get(0)
.getContainerID())));
.getContainer().getId())));
assertEquals(ApplicationState.FINISHING_CONTAINERS_WAIT,
wa.app.getApplicationState());
@ -487,7 +487,7 @@ public class TestApplication {
public void containerFinished(int containerNum) {
app.handle(new ApplicationContainerFinishedEvent(containers.get(
containerNum).getContainerID()));
containerNum).getContainer().getId()));
drainDispatcherEvents();
}
@ -514,7 +514,10 @@ public class TestApplication {
BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId cId = BuilderUtils.newContainerId(appAttemptId, containerId);
Container c = mock(Container.class);
when(c.getContainerID()).thenReturn(cId);
org.apache.hadoop.yarn.api.records.Container containerAPI =
mock(org.apache.hadoop.yarn.api.records.Container.class);
when(c.getContainer()).thenReturn(containerAPI);
when(c.getContainer().getId()).thenReturn(cId);
ContainerLaunchContext launchContext = mock(ContainerLaunchContext.class);
when(c.getLaunchContext()).thenReturn(launchContext);
when(launchContext.getApplicationACLs()).thenReturn(

View File

@ -376,7 +376,7 @@ public class TestContainer {
public boolean matches(Object o) {
ContainersLauncherEvent evt = (ContainersLauncherEvent) o;
return evt.getType() == ContainersLauncherEventType.LAUNCH_CONTAINER
&& wcf.cId == evt.getContainer().getContainerID();
&& wcf.cId == evt.getContainer().getContainer().getId();
}
};
verify(wc.launcherBus).handle(argThat(matchesLaunchReq));
@ -639,7 +639,7 @@ public class TestContainer {
Path p = new Path(cache, rsrc.getKey());
localPaths.put(p, Arrays.asList(rsrc.getKey()));
// rsrc copied to p
c.handle(new ContainerResourceLocalizedEvent(c.getContainerID(),
c.handle(new ContainerResourceLocalizedEvent(c.getContainer().getId(),
req, p));
}
drainDispatcherEvents();
@ -662,7 +662,8 @@ public class TestContainer {
LocalResource rsrc = localResources.get(rsrcKey);
LocalResourceRequest req = new LocalResourceRequest(rsrc);
Exception e = new Exception("Fake localization error");
c.handle(new ContainerResourceFailedEvent(c.getContainerID(), req, e));
c.handle(new ContainerResourceFailedEvent(c.getContainer()
.getId(), req, e));
drainDispatcherEvents();
}
@ -677,7 +678,7 @@ public class TestContainer {
++counter;
LocalResourceRequest req = new LocalResourceRequest(rsrc.getValue());
Exception e = new Exception("Fake localization error");
c.handle(new ContainerResourceFailedEvent(c.getContainerID(),
c.handle(new ContainerResourceFailedEvent(c.getContainer().getId(),
req, e));
}
drainDispatcherEvents();

View File

@ -37,6 +37,7 @@ import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnsupportedFileSystemException;
import org.apache.hadoop.util.Shell;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
@ -51,14 +52,13 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.URL;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainerLaunch;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.ContainerLocalizer;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin;
@ -150,50 +150,17 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
}
}
}
// this is a dirty hack - but should be ok for a unittest.
@SuppressWarnings({ "rawtypes", "unchecked" })
public static void setNewEnvironmentHack(Map<String, String> newenv) throws Exception {
try {
Class<?> cl = Class.forName("java.lang.ProcessEnvironment");
Field field = cl.getDeclaredField("theEnvironment");
field.setAccessible(true);
Map<String, String> env = (Map<String, String>)field.get(null);
env.clear();
env.putAll(newenv);
Field ciField = cl.getDeclaredField("theCaseInsensitiveEnvironment");
ciField.setAccessible(true);
Map<String, String> cienv = (Map<String, String>)ciField.get(null);
cienv.clear();
cienv.putAll(newenv);
} catch (NoSuchFieldException e) {
Class[] classes = Collections.class.getDeclaredClasses();
Map<String, String> env = System.getenv();
for (Class cl : classes) {
if ("java.util.Collections$UnmodifiableMap".equals(cl.getName())) {
Field field = cl.getDeclaredField("m");
field.setAccessible(true);
Object obj = field.get(env);
Map<String, String> map = (Map<String, String>) obj;
map.clear();
map.putAll(newenv);
}
}
}
}
/**
* See if environment variable is forwarded using sanitizeEnv.
* @throws Exception
*/
@Test
@Test (timeout = 5000)
public void testContainerEnvVariables() throws Exception {
containerManager.start();
Map<String, String> envWithDummy = new HashMap<String, String>();
envWithDummy.putAll(System.getenv());
envWithDummy.put(Environment.MALLOC_ARENA_MAX.name(), "99");
setNewEnvironmentHack(envWithDummy);
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
Container mockContainer = mock(Container.class);
// ////// Construct the Container-id
@ -207,34 +174,54 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
ContainerId cId =
recordFactory.newRecordInstance(ContainerId.class);
cId.setApplicationAttemptId(appAttemptId);
String malloc = System.getenv(Environment.MALLOC_ARENA_MAX.name());
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeHttpAddress()).thenReturn(
context.getNodeId().getHost() + ":12345");
Map<String, String> userSetEnv = new HashMap<String, String>();
userSetEnv.put(Environment.CONTAINER_ID.name(), "user_set_container_id");
userSetEnv.put(Environment.NM_HOST.name(), "user_set_NM_HOST");
userSetEnv.put(Environment.NM_PORT.name(), "user_set_NM_PORT");
userSetEnv.put(Environment.NM_HTTP_PORT.name(), "user_set_NM_HTTP_PORT");
userSetEnv.put(Environment.LOCAL_DIRS.name(), "user_set_LOCAL_DIR");
containerLaunchContext.setUser(user);
containerLaunchContext.setEnvironment(userSetEnv);
File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
PrintWriter fileWriter = new PrintWriter(scriptFile);
File processStartFile =
new File(tmpDir, "env_vars.txt").getAbsoluteFile();
if (Shell.WINDOWS) {
fileWriter.println("@echo " + Environment.MALLOC_ARENA_MAX.$() + "> " +
processStartFile);
fileWriter.println("@echo " + cId + ">> " + processStartFile);
fileWriter.println("@echo " + Environment.CONTAINER_ID.$() + "> "
+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HOST.$() + ">> "
+ processStartFile);
fileWriter.println("@echo " + Environment.NM_PORT.$() + ">> "
+ processStartFile);
fileWriter.println("@echo " + Environment.NM_HTTP_PORT.$() + ">> "
+ processStartFile);
fileWriter.println("@echo " + Environment.LOCAL_DIRS.$() + ">> "
+ processStartFile);
fileWriter.println("@ping -n 100 127.0.0.1 >nul");
} else {
fileWriter.write("\numask 0"); // So that start file is readable by the test
fileWriter.write("\necho " + Environment.MALLOC_ARENA_MAX.$() + " > " +
processStartFile);
fileWriter.write("\necho $" + Environment.CONTAINER_ID.name() + " > "
+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HOST.name() + " >> "
+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_PORT.name() + " >> "
+ processStartFile);
fileWriter.write("\necho $" + Environment.NM_HTTP_PORT.name() + " >> "
+ processStartFile);
fileWriter.write("\necho $" + Environment.LOCAL_DIRS.name() + " >> "
+ processStartFile);
fileWriter.write("\necho $$ >> " + processStartFile);
fileWriter.write("\nexec sleep 100");
}
fileWriter.close();
assert(malloc != null && !"".equals(malloc));
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
when(mockContainer.getId()).thenReturn(cId);
containerLaunchContext.setUser(user);
// upload the script file so that the container can run it
URL resource_alpha =
ConverterUtils.getYarnUrlFromPath(localFS
@ -272,9 +259,40 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
processStartFile.exists());
// Now verify the contents of the file
List<String> localDirs = dirsHandler.getLocalDirs();
List<Path> appDirs = new ArrayList<Path>(localDirs.size());
for (String localDir : localDirs) {
Path usersdir = new Path(localDir, ContainerLocalizer.USERCACHE);
Path userdir = new Path(usersdir, user);
Path appsdir = new Path(userdir, ContainerLocalizer.APPCACHE);
appDirs.add(new Path(appsdir, appId.toString()));
}
BufferedReader reader =
new BufferedReader(new FileReader(processStartFile));
Assert.assertEquals(malloc, reader.readLine());
Assert.assertEquals(cId.toString(), reader.readLine());
Assert.assertEquals(mockContainer.getNodeId().getHost(),
reader.readLine());
Assert.assertEquals(String.valueOf(mockContainer.getNodeId().getPort()),
reader.readLine());
Assert.assertEquals(
String.valueOf(mockContainer.getNodeHttpAddress().split(":")[1]),
reader.readLine());
Assert.assertEquals(StringUtils.join(",", appDirs), reader.readLine());
Assert.assertEquals(cId.toString(), containerLaunchContext
.getEnvironment().get(Environment.CONTAINER_ID.name()));
Assert.assertEquals(mockContainer.getNodeId().getHost(),
containerLaunchContext.getEnvironment()
.get(Environment.NM_HOST.name()));
Assert.assertEquals(String.valueOf(mockContainer.getNodeId().getPort()),
containerLaunchContext.getEnvironment().get(
Environment.NM_PORT.name()));
Assert.assertEquals(
mockContainer.getNodeHttpAddress().split(":")[1],
containerLaunchContext.getEnvironment().get(
Environment.NM_HTTP_PORT.name()));
Assert.assertEquals(StringUtils.join(",", appDirs), containerLaunchContext
.getEnvironment().get(Environment.LOCAL_DIRS.name()));
// Get the pid of the process
String pid = reader.readLine().trim();
// No more lines
@ -354,6 +372,9 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
ContainerLaunchContext containerLaunchContext =
recordFactory.newRecordInstance(ContainerLaunchContext.class);
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeHttpAddress()).thenReturn(
context.getNodeId().getHost() + ":12345");
containerLaunchContext.setUser(user);

View File

@ -42,6 +42,7 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.EnumSet;
@ -72,6 +73,7 @@ import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
@ -87,6 +89,7 @@ import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl;
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.DeletionService;
import org.apache.hadoop.yarn.server.nodemanager.LocalDirsHandlerService;
import org.apache.hadoop.yarn.server.nodemanager.api.ResourceLocalizationSpec;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalResourceStatus;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerAction;
import org.apache.hadoop.yarn.server.nodemanager.api.protocolrecords.LocalizerHeartbeatResponse;
@ -491,7 +494,7 @@ public class TestResourceLocalizationService {
Thread.sleep(1000);
dispatcher.await();
String appStr = ConverterUtils.toString(appId);
String ctnrStr = c.getContainerID().toString();
String ctnrStr = c.getContainer().getId().toString();
ArgumentCaptor<Path> tokenPathCaptor = ArgumentCaptor.forClass(Path.class);
verify(exec).startLocalizer(tokenPathCaptor.capture(),
isA(InetSocketAddress.class), eq("user0"), eq(appStr), eq(ctnrStr),
@ -567,7 +570,7 @@ public class TestResourceLocalizationService {
public boolean matches(Object o) {
ContainerEvent evt = (ContainerEvent) o;
return evt.getType() == ContainerEventType.RESOURCE_LOCALIZED
&& c.getContainerID() == evt.getContainerID();
&& c.getContainer().getId() == evt.getContainerID();
}
};
// total 2 resource localzation calls. one for each resource.
@ -756,11 +759,11 @@ public class TestResourceLocalizationService {
// Container - 1
ContainerImpl container1 = createMockContainer(user, 1);
String localizerId1 = container1.getContainerID().toString();
String localizerId1 = container1.getContainer().getId().toString();
rls.getPrivateLocalizers().put(
localizerId1,
rls.new LocalizerRunner(new LocalizerContext(user, container1
.getContainerID(), null), localizerId1));
.getContainer().getId(), null), localizerId1));
LocalizerRunner localizerRunner1 = rls.getLocalizerRunner(localizerId1);
dispatcher1.getEventHandler().handle(
@ -771,11 +774,11 @@ public class TestResourceLocalizationService {
// Container - 2 now makes the request.
ContainerImpl container2 = createMockContainer(user, 2);
String localizerId2 = container2.getContainerID().toString();
String localizerId2 = container2.getContainer().getId().toString();
rls.getPrivateLocalizers().put(
localizerId2,
rls.new LocalizerRunner(new LocalizerContext(user, container2
.getContainerID(), null), localizerId2));
.getContainer().getId(), null), localizerId2));
LocalizerRunner localizerRunner2 = rls.getLocalizerRunner(localizerId2);
dispatcher1.getEventHandler().handle(
createContainerLocalizationEvent(container2,
@ -848,6 +851,163 @@ public class TestResourceLocalizationService {
}
}
@Test(timeout = 10000)
@SuppressWarnings("unchecked")
public void testLocalResourcePath() throws Exception {
// test the local path where application and user cache files will be
// localized.
DrainDispatcher dispatcher1 = null;
try {
dispatcher1 = new DrainDispatcher();
String user = "testuser";
ApplicationId appId = BuilderUtils.newApplicationId(1, 1);
// mocked Resource Localization Service
Configuration conf = new Configuration();
AbstractFileSystem spylfs =
spy(FileContext.getLocalFSFileContext().getDefaultFileSystem());
final FileContext lfs = FileContext.getFileContext(spylfs, conf);
// We don't want files to be created
doNothing().when(spylfs).mkdir(isA(Path.class), isA(FsPermission.class),
anyBoolean());
// creating one local directory
List<Path> localDirs = new ArrayList<Path>();
String[] sDirs = new String[1];
for (int i = 0; i < 1; ++i) {
localDirs.add(lfs.makeQualified(new Path(basedir, i + "")));
sDirs[i] = localDirs.get(i).toString();
}
conf.setStrings(YarnConfiguration.NM_LOCAL_DIRS, sDirs);
// setting log directory.
String logDir =
lfs.makeQualified(new Path(basedir, "logdir ")).toString();
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir);
LocalDirsHandlerService localDirHandler = new LocalDirsHandlerService();
localDirHandler.init(conf);
// Registering event handlers
EventHandler<ApplicationEvent> applicationBus = mock(EventHandler.class);
dispatcher1.register(ApplicationEventType.class, applicationBus);
EventHandler<ContainerEvent> containerBus = mock(EventHandler.class);
dispatcher1.register(ContainerEventType.class, containerBus);
ContainerExecutor exec = mock(ContainerExecutor.class);
DeletionService delService = mock(DeletionService.class);
LocalDirsHandlerService dirsHandler = new LocalDirsHandlerService();
// initializing directory handler.
dirsHandler.init(conf);
dispatcher1.init(conf);
dispatcher1.start();
ResourceLocalizationService rls =
new ResourceLocalizationService(dispatcher1, exec, delService,
localDirHandler);
dispatcher1.register(LocalizationEventType.class, rls);
rls.init(conf);
rls.handle(createApplicationLocalizationEvent(user, appId));
// We need to pre-populate the LocalizerRunner as the
// Resource Localization Service code internally starts them which
// definitely we don't want.
// creating new container and populating corresponding localizer runner
// Container - 1
Container container1 = createMockContainer(user, 1);
String localizerId1 = container1.getContainer().getId().toString();
rls.getPrivateLocalizers().put(
localizerId1,
rls.new LocalizerRunner(new LocalizerContext(user, container1
.getContainer().getId(), null), localizerId1));
// Creating two requests for container
// 1) Private resource
// 2) Application resource
LocalResourceRequest reqPriv =
new LocalResourceRequest(new Path("file:///tmp1"), 123L,
LocalResourceType.FILE, LocalResourceVisibility.PRIVATE, "");
List<LocalResourceRequest> privList =
new ArrayList<LocalResourceRequest>();
privList.add(reqPriv);
LocalResourceRequest reqApp =
new LocalResourceRequest(new Path("file:///tmp2"), 123L,
LocalResourceType.FILE, LocalResourceVisibility.APPLICATION, "");
List<LocalResourceRequest> appList =
new ArrayList<LocalResourceRequest>();
appList.add(reqApp);
Map<LocalResourceVisibility, Collection<LocalResourceRequest>> rsrcs =
new HashMap<LocalResourceVisibility, Collection<LocalResourceRequest>>();
rsrcs.put(LocalResourceVisibility.APPLICATION, appList);
rsrcs.put(LocalResourceVisibility.PRIVATE, privList);
dispatcher1.getEventHandler().handle(
new ContainerLocalizationRequestEvent(container1, rsrcs));
// Now waiting for resource download to start. Here actual will not start
// Only the resources will be populated into pending list.
Assert
.assertTrue(waitForPrivateDownloadToStart(rls, localizerId1, 2, 500));
// Validating user and application cache paths
String userCachePath =
StringUtils.join(Path.SEPARATOR, Arrays.asList(localDirs.get(0)
.toUri().getRawPath(), ContainerLocalizer.USERCACHE, user,
ContainerLocalizer.FILECACHE));
String userAppCachePath =
StringUtils.join(Path.SEPARATOR, Arrays.asList(localDirs.get(0)
.toUri().getRawPath(), ContainerLocalizer.USERCACHE, user,
ContainerLocalizer.APPCACHE, appId.toString(),
ContainerLocalizer.FILECACHE));
// Now the Application and private resources may come in any order
// for download.
// For User cahce :
// returned destinationPath = user cache path + random number
// For App cache :
// returned destinationPath = user app cache path + random number
int returnedResources = 0;
boolean appRsrc = false, privRsrc = false;
while (returnedResources < 2) {
LocalizerHeartbeatResponse response =
rls.heartbeat(createLocalizerStatus(localizerId1));
for (ResourceLocalizationSpec resourceSpec : response
.getResourceSpecs()) {
returnedResources++;
Path destinationDirectory =
new Path(resourceSpec.getDestinationDirectory().getFile());
if (resourceSpec.getResource().getVisibility() ==
LocalResourceVisibility.APPLICATION) {
appRsrc = true;
Assert.assertEquals(userAppCachePath, destinationDirectory
.getParent().toUri().toString());
} else if (resourceSpec.getResource().getVisibility() ==
LocalResourceVisibility.PRIVATE) {
privRsrc = true;
Assert.assertEquals(userCachePath, destinationDirectory.getParent()
.toUri().toString());
} else {
throw new Exception("Unexpected resource recevied.");
}
}
}
// We should receive both the resources (Application and Private)
Assert.assertTrue(appRsrc && privRsrc);
} finally {
if (dispatcher1 != null) {
dispatcher1.stop();
}
}
}
private LocalizerStatus createLocalizerStatusForFailedResource(
String localizerId, LocalResourceRequest req) {
LocalizerStatus status = createLocalizerStatus(localizerId);
@ -1154,7 +1314,10 @@ public class TestResourceLocalizationService {
private ContainerImpl createMockContainer(String user, int containerId) {
ContainerImpl container = mock(ContainerImpl.class);
when(container.getContainerID()).thenReturn(
org.apache.hadoop.yarn.api.records.Container c =
mock(org.apache.hadoop.yarn.api.records.Container.class);
when(container.getContainer()).thenReturn(c);
when(container.getContainer().getId()).thenReturn(
BuilderUtils.newContainerId(1, 1, 1, containerId));
when(container.getUser()).thenReturn(user);
Credentials mockCredentials = mock(Credentials.class);
@ -1194,8 +1357,11 @@ public class TestResourceLocalizationService {
ApplicationAttemptId appAttemptId =
BuilderUtils.newApplicationAttemptId(appId, 1);
ContainerId cId = BuilderUtils.newContainerId(appAttemptId, id);
org.apache.hadoop.yarn.api.records.Container containerAPI =
mock(org.apache.hadoop.yarn.api.records.Container.class);
when(c.getContainer()).thenReturn(containerAPI);
when(c.getUser()).thenReturn("user0");
when(c.getContainerID()).thenReturn(cId);
when(c.getContainer().getId()).thenReturn(cId);
Credentials creds = new Credentials();
creds.addToken(new Text("tok" + id), getToken(id));
when(c.getCredentials()).thenReturn(creds);

View File

@ -213,6 +213,9 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
cId.setApplicationAttemptId(appAttemptId);
when(mockContainer.getId()).thenReturn(cId);
when(mockContainer.getNodeId()).thenReturn(context.getNodeId());
when(mockContainer.getNodeHttpAddress()).thenReturn(
context.getNodeId().getHost() + ":12345");
containerLaunchContext.setUser(user);
URL resource_alpha =

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Cont
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerEvent;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
import org.apache.hadoop.yarn.util.BuilderUtils;
import static org.mockito.Mockito.*;
public class MockContainer implements Container {
@ -48,6 +49,7 @@ public class MockContainer implements Container {
private final Map<Path, List<String>> resource =
new HashMap<Path, List<String>>();
private RecordFactory recordFactory;
private org.apache.hadoop.yarn.api.records.Container mockContainer;
public MockContainer(ApplicationAttemptId appAttemptId,
Dispatcher dispatcher, Configuration conf, String user,
@ -62,17 +64,14 @@ public class MockContainer implements Container {
launchContext.setUser(user);
this.state = ContainerState.NEW;
mockContainer = mock(org.apache.hadoop.yarn.api.records.Container.class);
when(mockContainer.getId()).thenReturn(id);
}
public void setState(ContainerState state) {
this.state = state;
}
@Override
public ContainerId getContainerID() {
return id;
}
@Override
public String getUser() {
return user;
@ -119,8 +118,7 @@ public class MockContainer implements Container {
}
@Override
public Resource getResource() {
return null;
public org.apache.hadoop.yarn.api.records.Container getContainer() {
return this.mockContainer;
}
}

View File

@ -185,16 +185,18 @@ public class TestNMWebServicesApps extends JerseyTest {
app.getUser(), app.getAppId(), 1);
Container container2 = new MockContainer(appAttemptId, dispatcher, conf,
app.getUser(), app.getAppId(), 2);
nmContext.getContainers().put(container1.getContainerID(), container1);
nmContext.getContainers().put(container2.getContainerID(), container2);
nmContext.getContainers()
.put(container1.getContainer().getId(), container1);
nmContext.getContainers()
.put(container2.getContainer().getId(), container2);
app.getContainers().put(container1.getContainerID(), container1);
app.getContainers().put(container2.getContainerID(), container2);
app.getContainers().put(container1.getContainer().getId(), container1);
app.getContainers().put(container2.getContainer().getId(), container2);
HashMap<String, String> hash = new HashMap<String, String>();
hash.put(container1.getContainerID().toString(), container1
.getContainerID().toString());
hash.put(container2.getContainerID().toString(), container2
.getContainerID().toString());
hash.put(container1.getContainer().getId().toString(), container1
.getContainer().getId().toString());
hash.put(container2.getContainer().getId().toString(), container2
.getContainer().getId().toString());
return hash;
}

View File

@ -186,16 +186,18 @@ public class TestNMWebServicesContainers extends JerseyTest {
app.getUser(), app.getAppId(), 1);
Container container2 = new MockContainer(appAttemptId, dispatcher, conf,
app.getUser(), app.getAppId(), 2);
nmContext.getContainers().put(container1.getContainerID(), container1);
nmContext.getContainers().put(container2.getContainerID(), container2);
nmContext.getContainers()
.put(container1.getContainer().getId(), container1);
nmContext.getContainers()
.put(container2.getContainer().getId(), container2);
app.getContainers().put(container1.getContainerID(), container1);
app.getContainers().put(container2.getContainerID(), container2);
app.getContainers().put(container1.getContainer().getId(), container1);
app.getContainers().put(container2.getContainer().getId(), container2);
HashMap<String, String> hash = new HashMap<String, String>();
hash.put(container1.getContainerID().toString(), container1
.getContainerID().toString());
hash.put(container2.getContainerID().toString(), container2
.getContainerID().toString());
hash.put(container1.getContainer().getId().toString(), container1
.getContainer().getId().toString());
hash.put(container2.getContainer().getId().toString(), container2
.getContainer().getId().toString());
return hash;
}
@ -468,7 +470,7 @@ public class TestNMWebServicesContainers extends JerseyTest {
String state, String user, int exitCode, String diagnostics,
String nodeId, int totalMemoryNeededMB, String logsLink)
throws JSONException, Exception {
WebServicesTestUtils.checkStringMatch("id", cont.getContainerID()
WebServicesTestUtils.checkStringMatch("id", cont.getContainer().getId()
.toString(), id);
WebServicesTestUtils.checkStringMatch("state", cont.getContainerState()
.toString(), state);
@ -481,8 +483,9 @@ public class TestNMWebServicesContainers extends JerseyTest {
WebServicesTestUtils.checkStringMatch("nodeId", nmContext.getNodeId()
.toString(), nodeId);
assertEquals("totalMemoryNeededMB wrong", 0, totalMemoryNeededMB);
String shortLink = ujoin("containerlogs", cont.getContainerID().toString(),
cont.getUser());
String shortLink =
ujoin("containerlogs", cont.getContainer().getId().toString(),
cont.getUser());
assertTrue("containerLogsLink wrong", logsLink.contains(shortLink));
}

View File

@ -178,17 +178,7 @@ public class AMLauncher implements Runnable {
Map<String, String> environment = container.getEnvironment();
environment.put(ApplicationConstants.APPLICATION_WEB_PROXY_BASE_ENV,
application.getWebProxyBase());
// Set the AppAttemptId, containerId, NMHTTPAdress, AppSubmitTime to be
// consumable by the AM.
environment.put(ApplicationConstants.AM_CONTAINER_ID_ENV,
containerID.toString());
environment.put(ApplicationConstants.NM_HOST_ENV, masterContainer
.getNodeId().getHost());
environment.put(ApplicationConstants.NM_PORT_ENV,
String.valueOf(masterContainer.getNodeId().getPort()));
String parts[] =
masterContainer.getNodeHttpAddress().split(":");
environment.put(ApplicationConstants.NM_HTTP_PORT_ENV, parts[1]);
// Set AppSubmitTime and MaxAppAttempts to be consumable by the AM.
ApplicationId applicationId =
application.getAppAttemptId().getApplicationId();
environment.put(

View File

@ -48,6 +48,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.service.AbstractService;
import com.google.common.annotations.VisibleForTesting;
/**
* Service to renew application delegation tokens.
*/
@ -139,7 +141,8 @@ public class DelegationTokenRenewer extends AbstractService {
* class that is used for keeping tracks of DT to renew
*
*/
private static class DelegationTokenToRenew {
@VisibleForTesting
protected static class DelegationTokenToRenew {
public final Token<?> token;
public final ApplicationId applicationId;
public final Configuration conf;
@ -252,7 +255,16 @@ public class DelegationTokenRenewer extends AbstractService {
private void addTokenToList(DelegationTokenToRenew t) {
delegationTokens.add(t);
}
@VisibleForTesting
public Set<Token<?>> getDelegationTokens() {
Set<Token<?>> tokens = new HashSet<Token<?>>();
for(DelegationTokenToRenew delegationToken : delegationTokens) {
tokens.add(delegationToken.token);
}
return tokens;
}
/**
* Add application tokens for renewal.
* @param applicationId added application
@ -343,7 +355,8 @@ public class DelegationTokenRenewer extends AbstractService {
/**
* set task to renew the token
*/
private void setTimerForTokenRenewal(DelegationTokenToRenew token)
@VisibleForTesting
protected void setTimerForTokenRenewal(DelegationTokenToRenew token)
throws IOException {
// calculate timer time
@ -358,7 +371,8 @@ public class DelegationTokenRenewer extends AbstractService {
}
// renew a token
private void renewToken(final DelegationTokenToRenew dttr)
@VisibleForTesting
protected void renewToken(final DelegationTokenToRenew dttr)
throws IOException {
// need to use doAs so that http can find the kerberos tgt
// NOTE: token renewers should be responsible for the correct UGI!

View File

@ -18,12 +18,15 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import java.nio.ByteBuffer;
import java.security.PrivilegedAction;
import java.util.Map;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.ClientRMProtocol;
import org.apache.hadoop.yarn.api.protocolrecords.GetNewApplicationRequest;
@ -41,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.server.RMDelegationTokenSecretManager;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent;
import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.ApplicationMasterLauncher;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
@ -130,26 +134,26 @@ public class MockRM extends ResourceManager {
public RMApp submitApp(int masterMemory, String name, String user) throws Exception {
return submitApp(masterMemory, name, user, null, false, null,
super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null);
}
public RMApp submitApp(int masterMemory, String name, String user,
Map<ApplicationAccessType, String> acls) throws Exception {
return submitApp(masterMemory, name, user, acls, false, null,
super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null);
}
public RMApp submitApp(int masterMemory, String name, String user,
Map<ApplicationAccessType, String> acls, String queue) throws Exception {
return submitApp(masterMemory, name, user, acls, false, queue,
super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null);
}
public RMApp submitApp(int masterMemory, String name, String user,
Map<ApplicationAccessType, String> acls, boolean unmanaged, String queue,
int maxAppAttempts) throws Exception {
int maxAppAttempts, Credentials ts) throws Exception {
ClientRMProtocol client = getClientRMService();
GetNewApplicationResponse resp = client.getNewApplication(Records
.newRecord(GetNewApplicationRequest.class));
@ -175,6 +179,12 @@ public class MockRM extends ResourceManager {
sub.setResource(capability);
clc.setApplicationACLs(acls);
clc.setUser(user);
if (ts != null && UserGroupInformation.isSecurityEnabled()) {
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
clc.setContainerTokens(securityTokens);
}
sub.setAMContainerSpec(clc);
req.setApplicationSubmissionContext(sub);
UserGroupInformation fakeUser =
@ -357,6 +367,10 @@ public class MockRM extends ResourceManager {
return this.nodesListManager;
}
public RMDelegationTokenSecretManager getRMDTSecretManager() {
return this.rmDTSecretManager;
}
@Override
protected void startWepApp() {
// override to disable webapp

View File

@ -38,7 +38,6 @@ import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
@ -71,17 +70,17 @@ public class TestApplicationMasterLauncher {
launched = true;
Map<String, String> env =
request.getContainerLaunchContext().getEnvironment();
containerIdAtContainerManager =
env.get(ApplicationConstants.AM_CONTAINER_ID_ENV);
ContainerId containerId =
ConverterUtils.toContainerId(containerIdAtContainerManager);
request.getContainer().getId();
containerIdAtContainerManager = containerId.toString();
attemptIdAtContainerManager =
containerId.getApplicationAttemptId().toString();
nmHostAtContainerManager = env.get(ApplicationConstants.NM_HOST_ENV);
nmHostAtContainerManager = request.getContainer().getNodeId().getHost();
nmPortAtContainerManager =
Integer.parseInt(env.get(ApplicationConstants.NM_PORT_ENV));
request.getContainer().getNodeId().getPort();
nmHttpPortAtContainerManager =
Integer.parseInt(env.get(ApplicationConstants.NM_HTTP_PORT_ENV));
Integer.parseInt(request.getContainer().getNodeHttpAddress()
.split(":")[1]);
submitTimeAtContainerManager =
Long.parseLong(env.get(ApplicationConstants.APP_SUBMIT_TIME_ENV));
maxAppAttempts =

View File

@ -18,11 +18,21 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
import org.apache.hadoop.io.DataOutputBuffer;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ExitUtil;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
@ -33,9 +43,11 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier;
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatResponse;
import org.apache.hadoop.yarn.server.api.records.NodeAction;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState;
import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState;
@ -43,6 +55,8 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer;
import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
@ -60,10 +74,8 @@ public class TestRMRestart {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE,
"org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore");
conf.set(YarnConfiguration.RM_SCHEDULER,
"org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair.FairScheduler");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
@ -159,7 +171,7 @@ public class TestRMRestart {
// create unmanaged app
RMApp appUnmanaged = rm1.submitApp(200, "someApp", "someUser", null, true,
null, conf.getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS));
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null);
ApplicationAttemptId unmanagedAttemptId =
appUnmanaged.getCurrentAppAttempt().getAppAttemptId();
// assert appUnmanaged info is saved
@ -321,8 +333,7 @@ public class TestRMRestart {
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE,
"org.apache.hadoop.yarn.server.resourcemanager.recovery.MemoryRMStateStore");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
@ -340,10 +351,12 @@ public class TestRMRestart {
// submit an app with maxAppAttempts equals to 1
RMApp app1 = rm1.submitApp(200, "name", "user",
new HashMap<ApplicationAccessType, String>(), false, "default", 1);
new HashMap<ApplicationAccessType, String>(), false, "default", 1,
null);
// submit an app with maxAppAttempts equals to -1
RMApp app2 = rm1.submitApp(200, "name", "user",
new HashMap<ApplicationAccessType, String>(), false, "default", -1);
new HashMap<ApplicationAccessType, String>(), false, "default", -1,
null);
// assert app1 info is saved
ApplicationState appState = rmAppState.get(app1.getApplicationId());
@ -389,4 +402,113 @@ public class TestRMRestart {
rm1.stop();
rm2.stop();
}
@Test
public void testTokenRestoredOnRMrestart() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
ExitUtil.disableSystemExit();
YarnConfiguration conf = new YarnConfiguration();
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, 2);
conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION,
"kerberos");
UserGroupInformation.setConfiguration(conf);
MemoryRMStateStore memStore = new MemoryRMStateStore();
memStore.init(conf);
RMState rmState = memStore.getState();
Map<ApplicationId, ApplicationState> rmAppState =
rmState.getApplicationState();
MockRM rm1 = new MyMockRM(conf, memStore);
rm1.start();
HashSet<Token<RMDelegationTokenIdentifier>> tokenSet =
new HashSet<Token<RMDelegationTokenIdentifier>>();
// create an empty credential
Credentials ts = new Credentials();
// create tokens and add into credential
Text userText1 = new Text("user1");
RMDelegationTokenIdentifier dtId1 =
new RMDelegationTokenIdentifier(userText1, new Text("renewer1"),
userText1);
Token<RMDelegationTokenIdentifier> token1 =
new Token<RMDelegationTokenIdentifier>(dtId1,
rm1.getRMDTSecretManager());
ts.addToken(userText1, token1);
tokenSet.add(token1);
Text userText2 = new Text("user2");
RMDelegationTokenIdentifier dtId2 =
new RMDelegationTokenIdentifier(userText2, new Text("renewer2"),
userText2);
Token<RMDelegationTokenIdentifier> token2 =
new Token<RMDelegationTokenIdentifier>(dtId2,
rm1.getRMDTSecretManager());
ts.addToken(userText2, token2);
tokenSet.add(token2);
// submit an app with customized credential
RMApp app = rm1.submitApp(200, "name", "user",
new HashMap<ApplicationAccessType, String>(), false, "default", 1, ts);
// assert app info is saved
ApplicationState appState = rmAppState.get(app.getApplicationId());
Assert.assertNotNull(appState);
// assert delegation tokens are saved
DataOutputBuffer dob = new DataOutputBuffer();
ts.writeTokenStorageToStream(dob);
ByteBuffer securityTokens =
ByteBuffer.wrap(dob.getData(), 0, dob.getLength());
Assert.assertEquals(securityTokens, appState
.getApplicationSubmissionContext().getAMContainerSpec()
.getContainerTokens());
// start new RM
MockRM rm2 = new MyMockRM(conf, memStore);
rm2.start();
// verify tokens are properly populated back to DelegationTokenRenewer
Assert.assertEquals(tokenSet, rm1.getRMContext()
.getDelegationTokenRenewer().getDelegationTokens());
// stop the RM
rm1.stop();
rm2.stop();
}
class MyMockRM extends MockRM {
public MyMockRM(Configuration conf, RMStateStore store) {
super(conf, store);
}
@Override
protected void doSecureLogin() throws IOException {
// Do nothing.
}
@Override
protected DelegationTokenRenewer createDelegationTokenRenewer() {
return new DelegationTokenRenewer() {
@Override
protected void renewToken(final DelegationTokenToRenew dttr)
throws IOException {
// Do nothing
}
@Override
protected void setTimerForTokenRenewal(DelegationTokenToRenew token)
throws IOException {
// Do nothing
}
};
}
}
}