diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 48d438e8f61..c00aed6bd95 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -256,6 +256,10 @@ Release 0.23.0 - Unreleased MAPREDUCE-2675. Reformat JobHistory Server main page to be more useful. (Robert Joseph Evans via vinodkv). + MAPREDUCE-2896. Simplify all apis to in + org.apache.hadoop.yarn.api.records.* to be get/set only. Added javadocs to + all public records. (acmurthy) + OPTIMIZATIONS MAPREDUCE-2026. Make JobTracker.getJobCounters() and diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java index ac921eb8b2f..c1feb7d77d3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java @@ -137,6 +137,7 @@ public abstract class TaskAttemptImpl implements protected final Configuration conf; protected final Path jobFile; protected final int partition; + @SuppressWarnings("rawtypes") protected final EventHandler eventHandler; private final TaskAttemptId attemptId; private final Clock clock; @@ -431,7 +432,8 @@ public abstract class TaskAttemptImpl implements //this is the last status reported by the REMOTE running attempt private TaskAttemptStatus reportedStatus; - public TaskAttemptImpl(TaskId taskId, int i, EventHandler eventHandler, + public TaskAttemptImpl(TaskId taskId, int i, + @SuppressWarnings("rawtypes") EventHandler eventHandler, TaskAttemptListener taskAttemptListener, Path jobFile, int partition, Configuration conf, String[] dataLocalHosts, OutputCommitter committer, Token jobToken, @@ -527,6 +529,13 @@ public abstract class TaskAttemptImpl implements ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class); + // Application resources + Map localResources = + new HashMap(); + + // Application environment + Map environment = new HashMap(); + try { FileSystem remoteFS = FileSystem.get(conf); @@ -535,7 +544,7 @@ public abstract class TaskAttemptImpl implements Path remoteJobJar = (new Path(remoteTask.getConf().get( MRJobConfig.JAR))).makeQualified(remoteFS.getUri(), remoteFS.getWorkingDirectory()); - container.setLocalResource( + localResources.put( MRConstants.JOB_JAR, createLocalResource(remoteFS, recordFactory, remoteJobJar, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION)); @@ -557,7 +566,7 @@ public abstract class TaskAttemptImpl implements new Path(path, oldJobId.toString()); Path remoteJobConfPath = new Path(remoteJobSubmitDir, MRConstants.JOB_CONF_FILE); - container.setLocalResource( + localResources.put( MRConstants.JOB_CONF_FILE, createLocalResource(remoteFS, recordFactory, remoteJobConfPath, LocalResourceType.FILE, LocalResourceVisibility.APPLICATION)); @@ -565,9 +574,14 @@ public abstract class TaskAttemptImpl implements + remoteJobConfPath.toUri().toASCIIString()); // //////////// End of JobConf setup + // Setup DistributedCache - setupDistributedCache(remoteFS, conf, container); + setupDistributedCache(remoteFS, conf, localResources, environment); + // Set local-resources and environment + container.setLocalResources(localResources); + container.setEnv(environment); + // Setup up tokens Credentials taskCredentials = new Credentials(); @@ -594,12 +608,12 @@ public abstract class TaskAttemptImpl implements // Add shuffle token LOG.info("Putting shuffle token in serviceData"); - container - .setServiceData( - ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, - ShuffleHandler.serializeServiceData(jobToken)); + Map serviceData = new HashMap(); + serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID, + ShuffleHandler.serializeServiceData(jobToken)); + container.setServiceData(serviceData); - MRApps.addToClassPath(container.getAllEnv(), getInitialClasspath()); + MRApps.addToClassPath(container.getEnv(), getInitialClasspath()); } catch (IOException e) { throw new YarnException(e); } @@ -622,11 +636,11 @@ public abstract class TaskAttemptImpl implements classPaths.add(workDir.toString()); // TODO // Construct the actual Container - container.addAllCommands(MapReduceChildJVM.getVMCommand( + container.setCommands(MapReduceChildJVM.getVMCommand( taskAttemptListener.getAddress(), remoteTask, javaHome, workDir.toString(), containerLogDir, childTmpDir, jvmID)); - MapReduceChildJVM.setVMEnv(container.getAllEnv(), classPaths, + MapReduceChildJVM.setVMEnv(container.getEnv(), classPaths, workDir.toString(), containerLogDir, nmLdLibraryPath, remoteTask, localizedApplicationTokensFile); @@ -648,11 +662,15 @@ public abstract class TaskAttemptImpl implements return result; } - private void setupDistributedCache(FileSystem remoteFS, Configuration conf, - ContainerLaunchContext container) throws IOException { + private void setupDistributedCache(FileSystem remoteFS, + Configuration conf, + Map localResources, + Map env) + throws IOException { // Cache archives - parseDistributedCacheArtifacts(remoteFS, container, LocalResourceType.ARCHIVE, + parseDistributedCacheArtifacts(remoteFS, localResources, env, + LocalResourceType.ARCHIVE, DistributedCache.getCacheArchives(conf), parseTimeStamps(DistributedCache.getArchiveTimestamps(conf)), getFileSizes(conf, MRJobConfig.CACHE_ARCHIVES_SIZES), @@ -660,7 +678,9 @@ public abstract class TaskAttemptImpl implements DistributedCache.getArchiveClassPaths(conf)); // Cache files - parseDistributedCacheArtifacts(remoteFS, container, LocalResourceType.FILE, + parseDistributedCacheArtifacts(remoteFS, + localResources, env, + LocalResourceType.FILE, DistributedCache.getCacheFiles(conf), parseTimeStamps(DistributedCache.getFileTimestamps(conf)), getFileSizes(conf, MRJobConfig.CACHE_FILES_SIZES), @@ -672,7 +692,10 @@ public abstract class TaskAttemptImpl implements // Use TaskDistributedCacheManager.CacheFiles.makeCacheFiles(URI[], // long[], boolean[], Path[], FileType) private void parseDistributedCacheArtifacts( - FileSystem remoteFS, ContainerLaunchContext container, LocalResourceType type, + FileSystem remoteFS, + Map localResources, + Map env, + LocalResourceType type, URI[] uris, long[] timestamps, long[] sizes, boolean visibilities[], Path[] pathsToPutOnClasspath) throws IOException { @@ -709,7 +732,7 @@ public abstract class TaskAttemptImpl implements throw new IllegalArgumentException("Resource name must be relative"); } String linkName = name.toUri().getPath(); - container.setLocalResource( + localResources.put( linkName, BuilderUtils.newLocalResource( p.toUri(), type, @@ -719,8 +742,7 @@ public abstract class TaskAttemptImpl implements sizes[i], timestamps[i]) ); if (classPaths.containsKey(u.getPath())) { - Map environment = container.getAllEnv(); - MRApps.addToClassPath(environment, linkName); + MRApps.addToClassPath(env, linkName); } } } @@ -892,6 +914,7 @@ public abstract class TaskAttemptImpl implements } } + @SuppressWarnings("unchecked") @Override public void handle(TaskAttemptEvent event) { LOG.info("Processing " + event.getTaskAttemptID() + @@ -1034,6 +1057,7 @@ public abstract class TaskAttemptImpl implements public RequestContainerTransition(boolean rescheduled) { this.rescheduled = rescheduled; } + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1062,6 +1086,7 @@ public abstract class TaskAttemptImpl implements private static class ContainerAssignedTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(final TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1111,6 +1136,7 @@ public abstract class TaskAttemptImpl implements this.finalState = finalState; this.withdrawsContainerRequest = withdrawsContainerRequest; } + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1157,6 +1183,7 @@ public abstract class TaskAttemptImpl implements private static class LaunchedContainerTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent evnt) { @@ -1207,6 +1234,7 @@ public abstract class TaskAttemptImpl implements private static class CommitPendingTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1218,6 +1246,7 @@ public abstract class TaskAttemptImpl implements private static class TaskCleanupTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1233,6 +1262,7 @@ public abstract class TaskAttemptImpl implements private static class SucceededTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1262,6 +1292,7 @@ public abstract class TaskAttemptImpl implements private static class FailedTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { // set the finish time @@ -1286,6 +1317,7 @@ public abstract class TaskAttemptImpl implements } } + @SuppressWarnings({ "unchecked" }) private void logAttemptFinishedEvent(TaskAttemptState state) { //Log finished events only if an attempt started. if (getLaunchTime() == 0) return; @@ -1319,6 +1351,7 @@ public abstract class TaskAttemptImpl implements private static class TooManyFetchFailureTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { //add to diagnostic @@ -1346,6 +1379,7 @@ public abstract class TaskAttemptImpl implements private static class KilledTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1372,6 +1406,7 @@ public abstract class TaskAttemptImpl implements private static class CleanupContainerTransition implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { @@ -1398,6 +1433,7 @@ public abstract class TaskAttemptImpl implements private static class StatusUpdater implements SingleArcTransition { + @SuppressWarnings("unchecked") @Override public void transition(TaskAttemptImpl taskAttempt, TaskAttemptEvent event) { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java index dfe9b8e02cc..18a0f2d5a6a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java @@ -70,7 +70,7 @@ public class LocalContainerAllocator extends RMCommunicator if (event.getType() == ContainerAllocator.EventType.CONTAINER_REQ) { LOG.info("Processing the event " + event.toString()); ContainerId cID = recordFactory.newRecordInstance(ContainerId.class); - cID.setAppId(appID); + cID.setApplicationAttemptId(applicationAttemptId); // use negative ids to denote that these are local. Need a better way ?? cID.setId((-1) * containerCount.getAndIncrement()); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java index 07a0cca16d1..499831c5f87 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java @@ -63,7 +63,6 @@ import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner; import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent; import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.apache.hadoop.security.Credentials; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.Clock; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; @@ -324,7 +323,7 @@ public class MRApp extends MRAppMaster { @Override public void handle(ContainerAllocatorEvent event) { ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(getContext().getApplicationID()); + cId.setApplicationAttemptId(getContext().getApplicationAttemptId()); cId.setId(containerCount++); Container container = recordFactory.newRecordInstance(Container.class); container.setId(cId); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java index 74bb1a89bf2..3615c27b152 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java @@ -30,6 +30,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator; import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent; import org.apache.hadoop.yarn.YarnException; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.NodeId; @@ -124,12 +125,15 @@ public class MRAppBenchmark { try { if (concurrentRunningTasks < maxConcurrentRunningTasks) { event = eventQueue.take(); - ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(getContext().getApplicationID()); + ContainerId cId = + recordFactory.newRecordInstance(ContainerId.class); + cId.setApplicationAttemptId( + getContext().getApplicationAttemptId()); cId.setId(containerCount++); //System.out.println("Allocating " + containerCount); - Container container = recordFactory.newRecordInstance(Container.class); + Container container = + recordFactory.newRecordInstance(Container.class); container.setId(cId); NodeId nodeId = recordFactory.newRecordInstance(NodeId.class); nodeId.setHost("dummy"); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index ce160b8f133..9f693860348 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -51,6 +51,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.yarn.MockApps; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.util.Records; @@ -235,7 +236,11 @@ public class MockJobs extends MockApps { @Override public ContainerId getAssignedContainerID() { ContainerId id = Records.newRecord(ContainerId.class); - id.setAppId(taid.getTaskId().getJobId().getAppId()); + ApplicationAttemptId appAttemptId = + Records.newRecord(ApplicationAttemptId.class); + appAttemptId.setApplicationId(taid.getTaskId().getJobId().getAppId()); + appAttemptId.setAttemptId(0); + id.setApplicationAttemptId(appAttemptId); return id; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java index 5f303440d04..67d676ce8a9 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedTaskAttempt.java @@ -29,6 +29,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState; import org.apache.hadoop.mapreduce.v2.api.records.TaskId; import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; @@ -82,12 +83,23 @@ public class CompletedTaskAttempt implements TaskAttempt { @Override public ContainerId getAssignedContainerID() { - //TODO ContainerId needs to be part of some historyEvent to be able to render the log directory. - ContainerId containerId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ContainerId.class); + //TODO ContainerId needs to be part of some historyEvent to be able to + //render the log directory. + ContainerId containerId = + RecordFactoryProvider.getRecordFactory(null).newRecordInstance( + ContainerId.class); containerId.setId(-1); - containerId.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class)); - containerId.getAppId().setId(-1); - containerId.getAppId().setClusterTimestamp(-1); + ApplicationAttemptId applicationAttemptId = + RecordFactoryProvider.getRecordFactory(null).newRecordInstance( + ApplicationAttemptId.class); + applicationAttemptId.setAttemptId(-1); + ApplicationId applicationId = + RecordFactoryProvider.getRecordFactory(null).newRecordInstance( + ApplicationId.class); + applicationId.setClusterTimestamp(-1); + applicationId.setId(-1); + applicationAttemptId.setApplicationId(applicationId); + containerId.setApplicationAttemptId(applicationAttemptId); return containerId; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index d3cab19822e..fa167a0acf1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -348,7 +348,6 @@ public class YARNRunner implements ClientProtocol { // Add { job jar, MR app jar } to classpath. Map environment = new HashMap(); -// appContext.environment = new HashMap(); MRApps.setInitialClasspath(environment); MRApps.addToClassPath(environment, MRConstants.JOB_JAR); MRApps.addToClassPath(environment, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java index d9414101560..e009d2eb17d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/AMResponse.java @@ -119,11 +119,7 @@ public interface AMResponse { @Stable public List getCompletedContainersStatuses(); - /** - * Set the list of list of completed containers' statuses. - * @param containers list of completed containers' statuses - */ - @Public - @Stable + @Private + @Unstable public void setCompletedContainersStatuses(List containers); } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java index 60f007fedbb..854afa1a71f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java @@ -20,12 +20,44 @@ package org.apache.hadoop.yarn.api.records; import java.text.NumberFormat; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + *

ApplicationAttemptId denotes the particular attempt + * of an ApplicationMaster for a given {@link ApplicationId}.

+ * + *

Multiple attempts might be needed to run an application to completion due + * to temporal failures of the ApplicationMaster such as hardware + * failures, connectivity issues etc. on the node on which it was scheduled.

+ */ +@Public +@Stable public abstract class ApplicationAttemptId implements Comparable { + + /** + * Get the ApplicationId of the ApplicationAttempId. + * @return ApplicationId of the ApplicationAttempId + */ + @Public + @Stable public abstract ApplicationId getApplicationId(); + + @Private + @Unstable + public abstract void setApplicationId(ApplicationId appID); + + /** + * Get the attempt id of the Application. + * @return attempt id of the Application + */ public abstract int getAttemptId(); - public abstract void setApplicationId(ApplicationId appID); + @Private + @Unstable public abstract void setAttemptId(int attemptId); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java index af860180352..f98bf99c87f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java @@ -18,11 +18,47 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + *

ApplicationId represents the globally unique + * identifier for an application.

+ * + *

The globally unique nature of the identifier is achieved by using the + * cluster timestamp i.e. start-time of the + * ResourceManager along with a monotonically increasing counter + * for the application.

+ */ +@Public +@Stable public abstract class ApplicationId implements Comparable { + + /** + * Get the short integer identifier of the ApplicationId + * which is unique for all applications started by a particular instance + * of the ResourceManager. + * @return short integer identifier of the ApplicationId + */ + @Public + @Stable public abstract int getId(); + + @Private + @Unstable + public abstract void setId(int id); + + /** + * Get the start time of the ResourceManager which is + * used to generate globally unique ApplicationId. + * @return start time of the ResourceManager + */ public abstract long getClusterTimestamp(); - public abstract void setId(int id); + @Private + @Unstable public abstract void setClusterTimestamp(long clusterTimestamp); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java index 725820ccc2b..3137009f48c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java @@ -18,26 +18,43 @@ package org.apache.hadoop.yarn.api.records; -//TODO: Split separate object for register, deregister and in-RM use. +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + * For internal use only... + */ +@Private +@Unstable public interface ApplicationMaster { ApplicationId getApplicationId(); - String getHost(); - int getRpcPort(); - String getTrackingUrl(); - ApplicationStatus getStatus(); - ApplicationState getState(); - String getClientToken(); - int getAMFailCount(); - int getContainerCount(); - String getDiagnostics(); void setApplicationId(ApplicationId appId); + + String getHost(); void setHost(String host); + + int getRpcPort(); void setRpcPort(int rpcPort); + + String getTrackingUrl(); void setTrackingUrl(String url); + + ApplicationStatus getStatus(); void setStatus(ApplicationStatus status); + + ApplicationState getState(); void setState(ApplicationState state); + + String getClientToken(); void setClientToken(String clientToken); + + int getAMFailCount(); void setAMFailCount(int amFailCount); + + int getContainerCount(); void setContainerCount(int containerCount); + + String getDiagnostics(); void setDiagnostics(String diagnostics); } + diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationState.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationState.java index 9faafd93cb1..6fcdea5cac9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationState.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationState.java @@ -18,6 +18,30 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; + +/** + * Ennumeration of various states of an Application. + */ +@Public +@Stable public enum ApplicationState { - NEW, SUBMITTED, RUNNING, SUCCEEDED, FAILED, KILLED + /** Application which was just created. */ + NEW, + + /** Application which has been submitted. */ + SUBMITTED, + + /** Application which is currently running. */ + RUNNING, + + /** Application which completed successfully. */ + SUCCEEDED, + + /** Application which failed. */ + FAILED, + + /** Application which was terminated by a user or admin. */ + KILLED } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationStatus.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationStatus.java index 6748c5369a2..a71a1652fe0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationStatus.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationStatus.java @@ -18,12 +18,21 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + * For internal use only... + */ +@Private +@Unstable public interface ApplicationStatus { ApplicationAttemptId getApplicationAttemptId(); - int getResponseId(); - float getProgress(); - void setApplicationAttemptId(ApplicationAttemptId applicationAttemptId); + + int getResponseId(); void setResponseId(int id); + + float getProgress(); void setProgress(float progress); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java index 523400b0ea5..97c84e4d10a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Container.java @@ -18,21 +18,133 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.yarn.api.AMRMProtocol; +import org.apache.hadoop.yarn.api.ContainerManager; +/** + *

Container represents an allocated resource in the cluster. + *

+ * + *

The ResourceManager is the sole authority to allocate any + * Container to applications. The allocated Container + * is always on a single node and has a unique {@link ContainerId}. It has + * a specific amount of {@link Resource} allocated.

+ * + *

It includes details such as: + *

    + *
  • {@link ContainerId} for the container, which is globally unique.
  • + *
  • + * {@link NodeId} of the node on which identifies the node on which it + * is allocated. + *
  • + *
  • HTTP uri of the node.
  • + *
  • {@link Resource} allocated to the container.
  • + *
  • {@link ContainerState} of the container.
  • + *
  • + * {@link ContainerToken} of the container, used to securely verify + * authenticity of the allocation. + *
  • + *
  • {@link ContainerStatus} of the container.
  • + *
+ *

+ * + *

Typically, an ApplicationMaster receives the + * Container from the ResourceManager during + * resource-negotiation and then talks to the NodManager to + * start/stop containers.

+ * + * @see AMRMProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + * @see ContainerManager#stopContainer(org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest) + */ +@Public +@Stable public interface Container extends Comparable { + /** + * Get the globally unique identifier for the container. + * @return globally unique identifier for the container + */ + @Public + @Stable ContainerId getId(); + + @Private + @Unstable + void setId(ContainerId id); + + /** + * Get the identifier of the node on which the container is allocated. + * @return identifier of the node on which the container is allocated + */ + @Public + @Stable NodeId getNodeId(); + + @Private + @Unstable + void setNodeId(NodeId nodeId); + + /** + * Get the http uri of the node on which the container is allocated. + * @return http uri of the node on which the container is allocated + */ + @Public + @Stable String getNodeHttpAddress(); + + @Private + @Unstable + void setNodeHttpAddress(String nodeHttpAddress); + + /** + * Get the Resource allocated to the container. + * @return Resource allocated to the container + */ + @Public + @Stable Resource getResource(); + + @Private + @Unstable + void setResource(Resource resource); + + /** + * Get the current ContainerState of the container. + * @return current ContainerState of the container + */ + @Public + @Stable ContainerState getState(); + + @Private + @Unstable + void setState(ContainerState state); + + /** + * Get the ContainerToken for the container. + * @return ContainerToken for the container + */ + @Public + @Stable ContainerToken getContainerToken(); + + @Private + @Unstable + void setContainerToken(ContainerToken containerToken); + + /** + * Get the ContainerStatus of the container. + * @return ContainerStatus of the container + */ + @Public + @Stable ContainerStatus getContainerStatus(); - void setId(ContainerId id); - void setNodeId(NodeId nodeId); - void setNodeHttpAddress(String nodeHttpAddress); - void setResource(Resource resource); - void setState(ContainerState state); - void setContainerToken(ContainerToken containerToken); + @Private + @Unstable void setContainerStatus(ContainerStatus containerStatus); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java index 17e5f00c9c2..7e43f164ddb 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerId.java @@ -20,13 +20,42 @@ package org.apache.hadoop.yarn.api.records; import java.text.NumberFormat; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + *

ContainerId represents a globally unique identifier + * for a {@link Container} in the cluster.

+ */ +@Public +@Stable public abstract class ContainerId implements Comparable{ - public abstract ApplicationAttemptId getAppAttemptId(); - public abstract ApplicationId getAppId(); - public abstract int getId(); + /** + * Get the ApplicationAttemptId of the application to which + * the Container was assigned. + * @return ApplicationAttemptId of the application to which + * the Container was assigned + */ + @Public + @Stable + public abstract ApplicationAttemptId getApplicationAttemptId(); - public abstract void setAppAttemptId(ApplicationAttemptId atId); - public abstract void setAppId(ApplicationId appID); + @Private + @Unstable + public abstract void setApplicationAttemptId(ApplicationAttemptId atId); + + /** + * Get the identifier of the ContainerId. + * @return identifier of the ContainerId + */ + @Public + @Stable + public abstract int getId(); + + @Private + @Unstable public abstract void setId(int id); @@ -74,7 +103,7 @@ public abstract class ContainerId implements Comparable{ int result = 1; result = prime * result + getId(); result = prime * result - + ((getAppAttemptId() == null) ? 0 : getAppAttemptId().hashCode()); + + ((getApplicationAttemptId() == null) ? 0 : getApplicationAttemptId().hashCode()); return result; } @@ -85,7 +114,8 @@ public abstract class ContainerId implements Comparable{ } if (other.getClass().isAssignableFrom(this.getClass())) { ContainerId otherCId = (ContainerId)other; - if (this.getAppAttemptId().equals(otherCId.getAppAttemptId())) { + if (this.getApplicationAttemptId().equals( + otherCId.getApplicationAttemptId())) { return this.getId() == otherCId.getId(); } } @@ -94,10 +124,12 @@ public abstract class ContainerId implements Comparable{ @Override public int compareTo(ContainerId other) { - if (this.getAppAttemptId().compareTo(other.getAppAttemptId()) == 0) { + if (this.getApplicationAttemptId().compareTo( + other.getApplicationAttemptId()) == 0) { return this.getId() - other.getId(); } else { - return this.getAppAttemptId().compareTo(other.getAppAttemptId()); + return this.getApplicationAttemptId().compareTo( + other.getApplicationAttemptId()); } } @@ -105,10 +137,10 @@ public abstract class ContainerId implements Comparable{ @Override public String toString() { StringBuilder sb = new StringBuilder(); - ApplicationId appId = getAppId(); + ApplicationId appId = getApplicationAttemptId().getApplicationId(); sb.append("container_").append(appId.getClusterTimestamp()).append("_"); sb.append(appIdFormat.get().format(appId.getId())).append("_"); - sb.append(appAttemptIdFormat.get().format(getAppAttemptId(). + sb.append(appAttemptIdFormat.get().format(getApplicationAttemptId(). getAttemptId())).append("_"); sb.append(containerIdFormat.get().format(getId())); return sb.toString(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java index cf5c9d0d4e0..0339df9af1f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java @@ -22,10 +22,8 @@ import java.nio.ByteBuffer; import java.util.List; import java.util.Map; -import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; -import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.ContainerManager; /** @@ -121,100 +119,52 @@ public interface ContainerLaunchContext { void setContainerTokens(ByteBuffer containerToken); /** - * Get all LocalResource required by the container. + * Get LocalResource required by the container. * @return all LocalResource required by the container */ @Public @Stable - Map getAllLocalResources(); + Map getLocalResources(); - @Private - @Unstable - LocalResource getLocalResource(String key); - /** - * Add all LocalResource required by the container. + * Set LocalResource required by the container. * @param localResources LocalResource required by the container */ @Public @Stable - void addAllLocalResources(Map localResources); - - @Private - @Unstable - void setLocalResource(String key, LocalResource value); - - @Private - @Unstable - void removeLocalResource(String key); - - @Private - @Unstable - void clearLocalResources(); + void setLocalResources(Map localResources); /** - * Get application-specific binary service data. - * @return application-specific binary service data + * Get application-specific binary service data. + * @return application-specific binary service data */ @Public @Stable - Map getAllServiceData(); - - @Private - @Unstable - ByteBuffer getServiceData(String key); - - /** - * Add add application-specific binary service data. - * @param serviceData application-specific binary service data - */ - @Public - @Stable - void addAllServiceData(Map serviceData); - - @Private - @Unstable - void setServiceData(String key, ByteBuffer value); - - @Private - @Unstable - void removeServiceData(String key); - - @Private - @Unstable - void clearServiceData(); - - /** - * Get environment variables for the launched container. - * @return environment variables for the launched container - */ - @Public - @Stable - Map getAllEnv(); - - @Private - @Unstable - String getEnv(String key); + Map getServiceData(); /** - * Add environment variables for the launched container. - * @param env environment variables for the launched container + * Set application-specific binary service data. + * @param serviceData application-specific binary service data */ @Public @Stable - void addAllEnv(Map env); + void setServiceData(Map serviceData); - @Private - @Unstable - void setEnv(String key, String value); - - @Private - @Unstable - void removeEnv(String key); - - @Private - @Unstable - void clearEnv(); + /** + * Get environment variables for the container. + * @return environment variables for the container + */ + @Public + @Stable + Map getEnv(); + + /** + * Add environment variables for the container. + * @param environment environment variables for the container + */ + @Public + @Stable + void setEnv(Map environment); /** * Get the list of commands for launching the container. @@ -222,15 +172,7 @@ public interface ContainerLaunchContext { */ @Public @Stable - List getCommandList(); - - @Private - @Unstable - String getCommand(int index); - - @Private - @Unstable - int getCommandCount(); + List getCommands(); /** * Add the list of commands for launching the container. @@ -238,17 +180,6 @@ public interface ContainerLaunchContext { */ @Public @Stable - void addAllCommands(List commands); + void setCommands(List commands); - @Private - @Unstable - void addCommand(String command); - - @Private - @Unstable - void removeCommand(int index); - - @Private - @Unstable - void clearCommands(); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java index 8a149d01fac..b7ff5371fc0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerState.java @@ -18,6 +18,16 @@ package org.apache.hadoop.yarn.api.records; +/** + *

State of a Container.

+ */ public enum ContainerState { - NEW, RUNNING, COMPLETE + /** New container */ + NEW, + + /** Running container */ + RUNNING, + + /** Completed container */ + COMPLETE } \ No newline at end of file diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java index c52a47c5486..e2dfc82c4b9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerStatus.java @@ -18,14 +18,81 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + *

ContainerStatus represents the current status of a + * Container.

+ * + *

It provides details such as: + *

    + *
  • ContainerId of the container.
  • + *
  • ContainerState of the container.
  • + *
  • Exit status of a completed container.
  • + *
  • Diagnostic message for a failed container.
  • + *
+ *

+ */ +@Public +@Stable public interface ContainerStatus { + /** + * Get the ContainerId of the container. + * @return ContainerId of the container + */ + @Public + @Stable ContainerId getContainerId(); + + @Private + @Unstable + void setContainerId(ContainerId containerId); + + /** + * Get the ContainerState of the container. + * @return ContainerState of the container + */ + @Public + @Stable ContainerState getState(); - String getExitStatus(); + + @Private + @Unstable + void setState(ContainerState state); + + /** + *

Get the exit status for the container.

+ * + *

Note: This is valid only for completed containers i.e. containers + * with state {@link ContainerState#COMPLETE}. + * Otherwise, it returns an invalid exit code equal to {@literal -1000};

+ * + *

Container killed by the framework, either due to being released by + * the application or being 'lost' due to node failures etc. have a special + * exit code of {@literal -100}.

+ * + * @return exit status for the container + */ + @Public + @Stable + int getExitStatus(); + + @Private + @Unstable + void setExitStatus(int exitStatus); + + /** + * Get diagnostic messages for failed containers. + * @return diagnostic messages for failed containers + */ + @Public + @Stable String getDiagnostics(); - void setContainerId(ContainerId containerId); - void setState(ContainerState state); - void setExitStatus(String exitStatus); + @Private + @Unstable void setDiagnostics(String diagnostics); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java index 52290dbd47b..da34f71f927 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java @@ -20,15 +20,76 @@ package org.apache.hadoop.yarn.api.records; import java.nio.ByteBuffer; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.AMRMProtocol; +import org.apache.hadoop.yarn.api.ContainerManager; + +/** + *

ContainerToken is the security token used by the framework + * to verify authenticity of any Container.

+ * + *

The ResourceManager, on container allocation provides a + * secure token which is verified by the NodeManager on + * container launch.

+ * + *

Applications do not need to care about ContainerToken, they + * are transparently handled by the framework - the allocated + * Container includes the ContainerToken.

+ * + * @see AMRMProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) + * @see ContainerManager#startContainer(org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest) + */ +@Public +@Stable public interface ContainerToken { + /** + * Get the token identifier. + * @return token identifier + */ + @Public + @Stable public abstract ByteBuffer getIdentifier(); - public abstract ByteBuffer getPassword(); - public abstract String getKind(); - public abstract String getService(); + @Private + @Stable public abstract void setIdentifier(ByteBuffer identifier); + + /** + * Get the token password + * @return token password + */ + @Public + @Stable + public abstract ByteBuffer getPassword(); + + @Private + @Stable public abstract void setPassword(ByteBuffer password); + + /** + * Get the token kind. + * @return token kind + */ + @Public + @Stable + public abstract String getKind(); + + @Private + @Stable public abstract void setKind(String kind); + + /** + * Get the service to which the token is allocated. + * @return service to which the token is allocated + */ + @Public + @Stable + public abstract String getService(); + + @Private + @Stable public abstract void setService(String service); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java index a7cec29e1b2..b06af127acf 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/NodeId.java @@ -18,11 +18,43 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + *

NodeId is the unique identifier for a node.

+ * + *

It includes the hostname and port to uniquely + * identify the node. Thus, it is unique across restarts of any + * NodeManager.

+ */ +@Public +@Stable public interface NodeId extends Comparable { + /** + * Get the hostname of the node. + * @return hostname of the node + */ + @Public + @Stable String getHost(); + + @Private + @Unstable void setHost(String host); + /** + * Get the port for communicating with the node. + * @return port for communicating with the node + */ + @Public + @Stable int getPort(); + + @Private + @Unstable void setPort(int port); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProtoBase.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProtoBase.java index c0c438b5642..c40e2338692 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProtoBase.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ProtoBase.java @@ -20,11 +20,15 @@ package org.apache.hadoop.yarn.api.records; import java.nio.ByteBuffer; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.util.ProtoUtils; import com.google.protobuf.ByteString; import com.google.protobuf.Message; +@Private +@Unstable public abstract class ProtoBase { public abstract T getProto(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java index eb5b681ede0..497c65ebe2c 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Resource.java @@ -18,10 +18,40 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.AMRMProtocol; + +/** + *

Resource models a set of computer resources in the + * cluster.

+ * + *

Currrently it only models memory.

+ * + *

Typically, applications request Resource of suitable + * capability to run their component tasks.

+ * + * @see ResourceRequest + * @see AMRMProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) + */ +@Public +@Stable public interface Resource extends Comparable { + + /** + * Get memory of the resource. + * @return memory of the resource + */ + @Public + @Stable public abstract int getMemory(); + /** + * Set memory of the resource. + * @param memory memory of the resource + */ + @Public + @Stable public abstract void setMemory(int memory); - } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java index 79e29b91e82..4072da1b613 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ResourceRequest.java @@ -18,16 +18,107 @@ package org.apache.hadoop.yarn.api.records; -public interface ResourceRequest extends Comparable{ +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.yarn.api.AMRMProtocol; + +/** + *

ResourceRequest represents the request made by an + * application to the ResourceManager to obtain various + * Container allocations.

+ * + *

It includes: + *

    + *
  • {@link Priority} of the request.
  • + *
  • + * The name of the machine or rack on which the allocation is + * desired. A special value of * signifies that + * any host/rack is acceptable to the application. + *
  • + *
  • {@link Resource} required for each request.
  • + *
  • + * Number of containers of such specifications which are required + * by the application. + *
  • + *
+ *

+ * + * @see Resource + * @see AMRMProtocol#allocate(org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest) + */ +@Public +@Stable +public interface ResourceRequest extends Comparable { + /** + * Get the Priority of the request. + * @return Priority of the request + */ + @Public + @Stable public abstract Priority getPriority(); + + /** + * Set the Priority of the request + * @param priority Priority of the request + */ + @Public + @Stable + public abstract void setPriority(Priority priority); + + /** + * Get the host/rack on which the allocation is desired. + * + * A special value of * signifies that any host/rack is + * acceptable. + * + * @return host/rack on which the allocation is desired + */ + @Public + @Stable public abstract String getHostName(); + + /** + * Set host/rack on which the allocation is desired. + * + * A special value of * signifies that any host/rack is + * acceptable. + * + * @param hostName host/rack on which the allocation is desired + */ + @Public + @Stable + public abstract void setHostName(String hostName); + + /** + * Get the Resource capability of the request. + * @return Resource capability of the request + */ + @Public + @Stable public abstract Resource getCapability(); + + /** + * Set the Resource capability of the request + * @param capability Resource capability of the request + */ + @Public + @Stable + public abstract void setCapability(Resource capability); + + /** + * Get the number of containers required with the given specifications. + * @return number of containers required with the given specifications + */ + @Public + @Stable public abstract int getNumContainers(); - public abstract void setPriority(Priority priority); - public abstract void setHostName(String hostName); - public abstract void setCapability(Resource capability); + /** + * Set the number of containers required with the given specifications + * @param numContainers number of containers required with the given + * specifications + */ + @Public + @Stable public abstract void setNumContainers(int numContainers); - - } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java index bdf4a7d285d..973302fff67 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/URL.java @@ -18,14 +18,77 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Evolving; + +/** + *

URL represents a serializable {@link java.net.URL}.

+ */ +@Public +@Evolving public interface URL { + + /** + * Get the scheme of the URL. + * @return scheme of the URL + */ + @Public + @Evolving public abstract String getScheme(); + + /** + * Set the scheme of the URL + * @param scheme scheme of the URL + */ + @Public + @Evolving + public abstract void setScheme(String scheme); + + /** + * Get the host of the URL. + * @return host of the URL + */ + @Public + @Evolving public abstract String getHost(); + + /** + * Set the host of the URL. + * @param host host of the URL + */ + @Public + @Evolving + public abstract void setHost(String host); + + /** + * Get the port of the URL. + * @return port of the URL + */ + @Public + @Evolving public abstract int getPort(); + + /** + * Set the port of the URL + * @param port port of the URL + */ + @Public + @Evolving + public abstract void setPort(int port); + + /** + * Get the file of the URL. + * @return file of the URL + */ + @Public + @Evolving public abstract String getFile(); - public abstract void setScheme(String scheme); - public abstract void setHost(String host); - public abstract void setPort(int port); + /** + * Set the file of the URL. + * @param file file of the URL + */ + @Public + @Evolving public abstract void setFile(String file); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java index 963cf068c4e..8ecf60c0edf 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/YarnClusterMetrics.java @@ -18,10 +18,30 @@ package org.apache.hadoop.yarn.api.records; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; +import org.apache.hadoop.classification.InterfaceStability.Unstable; + +/** + *

YarnClusterMetrics represents cluster metrics.

+ * + *

Currently only number of NodeManagers is provided.

+ */ +@Public +@Stable public interface YarnClusterMetrics { + /** + * Get the number of NodeManagers in the cluster. + * @return number of NodeManagers in the cluster + */ + @Public + @Stable public abstract int getNumNodeManagers(); - + + @Private + @Unstable public abstract void setNumNodeManagers(int numNodeManagers); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java index 37d792f080f..3227ce8a903 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerIdPBImpl.java @@ -19,10 +19,8 @@ package org.apache.hadoop.yarn.api.records.impl.pb; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProtoOrBuilder; @@ -32,8 +30,7 @@ public class ContainerIdPBImpl extends ContainerId { ContainerIdProto.Builder builder = null; boolean viaProto = false; - private ApplicationId applicationId = null; - private ApplicationAttemptId appAttemptId = null; + private ApplicationAttemptId applicationAttemptId = null; public ContainerIdPBImpl() { builder = ContainerIdProto.newBuilder(); @@ -52,11 +49,10 @@ public class ContainerIdPBImpl extends ContainerId { } private synchronized void mergeLocalToBuilder() { - if (this.applicationId != null && !((ApplicationIdPBImpl)applicationId).getProto().equals(builder.getAppId())) { - builder.setAppId(convertToProtoFormat(this.applicationId)); - } - if (this.appAttemptId != null && !((ApplicationAttemptIdPBImpl)appAttemptId).getProto().equals(builder.getAppAttemptId())) { - builder.setAppAttemptId(convertToProtoFormat(this.appAttemptId)); + if (this.applicationAttemptId != null && ! + ((ApplicationAttemptIdPBImpl)applicationAttemptId).getProto().equals( + builder.getAppAttemptId())) { + builder.setAppAttemptId(convertToProtoFormat(this.applicationAttemptId)); } } @@ -87,61 +83,36 @@ public class ContainerIdPBImpl extends ContainerId { maybeInitBuilder(); builder.setId((id)); } - @Override - public synchronized ApplicationId getAppId() { - ContainerIdProtoOrBuilder p = viaProto ? proto : builder; - if (this.applicationId != null) { - return this.applicationId; - } - if (!p.hasAppId()) { - return null; - } - this.applicationId = convertFromProtoFormat(p.getAppId()); - return this.applicationId; - } + @Override - public synchronized ApplicationAttemptId getAppAttemptId() { + public synchronized ApplicationAttemptId getApplicationAttemptId() { ContainerIdProtoOrBuilder p = viaProto ? proto : builder; - if (this.appAttemptId != null) { - return this.appAttemptId; + if (this.applicationAttemptId != null) { + return this.applicationAttemptId; } if (!p.hasAppAttemptId()) { return null; } - this.appAttemptId = convertFromProtoFormat(p.getAppAttemptId()); - return this.appAttemptId; + this.applicationAttemptId = convertFromProtoFormat(p.getAppAttemptId()); + return this.applicationAttemptId; } @Override - public synchronized void setAppId(ApplicationId appId) { - maybeInitBuilder(); - if (appId == null) - builder.clearAppId(); - this.applicationId = appId; - } - - @Override - public synchronized void setAppAttemptId(ApplicationAttemptId atId) { + public synchronized void setApplicationAttemptId(ApplicationAttemptId atId) { maybeInitBuilder(); if (atId == null) builder.clearAppAttemptId(); - this.appAttemptId = atId; + this.applicationAttemptId = atId; } - private ApplicationAttemptIdPBImpl convertFromProtoFormat(ApplicationAttemptIdProto p) { + private ApplicationAttemptIdPBImpl convertFromProtoFormat( + ApplicationAttemptIdProto p) { return new ApplicationAttemptIdPBImpl(p); } - private ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) { + private ApplicationAttemptIdProto convertToProtoFormat( + ApplicationAttemptId t) { return ((ApplicationAttemptIdPBImpl)t).getProto(); } - - private ApplicationIdPBImpl convertFromProtoFormat(ApplicationIdProto p) { - return new ApplicationIdPBImpl(p); - } - - private ApplicationIdProto convertToProtoFormat(ApplicationId t) { - return ((ApplicationIdPBImpl)t).getProto(); - } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java index 07d1705aa14..0696d8327bd 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java @@ -41,8 +41,11 @@ import org.apache.hadoop.yarn.proto.YarnProtos.StringStringMapProto; -public class ContainerLaunchContextPBImpl extends ProtoBase implements ContainerLaunchContext { - ContainerLaunchContextProto proto = ContainerLaunchContextProto.getDefaultInstance(); +public class ContainerLaunchContextPBImpl +extends ProtoBase +implements ContainerLaunchContext { + ContainerLaunchContextProto proto = + ContainerLaunchContextProto.getDefaultInstance(); ContainerLaunchContextProto.Builder builder = null; boolean viaProto = false; @@ -72,10 +75,14 @@ public class ContainerLaunchContextPBImpl extends ProtoBase getCommandList() { + public List getCommands() { initCommands(); return this.commands; } - @Override - public String getCommand(int index) { - initCommands(); - return this.commands.get(index); - } - @Override - public int getCommandCount() { - initCommands(); - return this.commands.size(); - } - + private void initCommands() { if (this.commands != null) { return; @@ -161,11 +159,12 @@ public class ContainerLaunchContextPBImpl extends ProtoBase command) { - if (command == null) + public void setCommands(final List commands) { + if (commands == null) return; initCommands(); - this.commands.addAll(command); + this.commands.clear(); + this.commands.addAll(commands); } private void addCommandsToProto() { @@ -175,21 +174,7 @@ public class ContainerLaunchContextPBImpl extends ProtoBase getAllLocalResources() { + public Map getLocalResources() { initLocalResources(); return this.localResources; } - @Override - public LocalResource getLocalResource(String key) { - initLocalResources(); - return this.localResources.get(key); - } - + private void initLocalResources() { if (this.localResources != null) { return; @@ -253,10 +234,12 @@ public class ContainerLaunchContextPBImpl extends ProtoBase localResources) { + public void setLocalResources( + final Map localResources) { if (localResources == null) return; initLocalResources(); + this.localResources.clear(); this.localResources.putAll(localResources); } @@ -265,7 +248,8 @@ public class ContainerLaunchContextPBImpl extends ProtoBase iterable = new Iterable() { + Iterable iterable = + new Iterable() { @Override public Iterator iterator() { @@ -281,7 +265,8 @@ public class ContainerLaunchContextPBImpl extends ProtoBase getAllServiceData() { + public Map getServiceData() { initServiceData(); return this.serviceData; } - @Override - public ByteBuffer getServiceData(String key) { - initServiceData(); - return this.serviceData.get(key); - } private void initServiceData() { if (this.serviceData != null) { @@ -353,7 +320,7 @@ public class ContainerLaunchContextPBImpl extends ProtoBase serviceData) { + public void setServiceData(final Map serviceData) { if (serviceData == null) return; initServiceData(); @@ -365,7 +332,8 @@ public class ContainerLaunchContextPBImpl extends ProtoBase iterable = new Iterable() { + Iterable iterable = + new Iterable() { @Override public Iterator iterator() { @@ -381,7 +349,8 @@ public class ContainerLaunchContextPBImpl extends ProtoBase getAllEnv() { + public Map getEnv() { initEnv(); return this.env; } - @Override - public String getEnv(String key) { - initEnv(); - return this.env.get(key); - } private void initEnv() { if (this.env != null) { @@ -433,10 +383,11 @@ public class ContainerLaunchContextPBImpl extends ProtoBase env) { + public void setEnv(final Map env) { if (env == null) return; initEnv(); + this.env.clear(); this.env.putAll(env); } @@ -445,7 +396,8 @@ public class ContainerLaunchContextPBImpl extends ProtoBase iterable = new Iterable() { + Iterable iterable = + new Iterable() { @Override public Iterator iterator() { @@ -461,7 +413,8 @@ public class ContainerLaunchContextPBImpl extends ProtoBase implements ContainerStatus { +public class ContainerStatusPBImpl extends ProtoBase +implements ContainerStatus { ContainerStatusProto proto = ContainerStatusProto.getDefaultInstance(); ContainerStatusProto.Builder builder = null; boolean viaProto = false; @@ -116,13 +117,13 @@ public class ContainerStatusPBImpl extends ProtoBase imple this.containerId = containerId; } @Override - public String getExitStatus() { + public int getExitStatus() { ContainerStatusProtoOrBuilder p = viaProto ? proto : builder; - return (p.getExitStatus()); + return p.getExitStatus(); } @Override - public void setExitStatus(String exitStatus) { + public void setExitStatus(int exitStatus) { maybeInitBuilder(); builder.setExitStatus(exitStatus); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index 34d8396417c..61e3d1f5b94 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -250,7 +250,7 @@ message ContainerStatusProto { optional ContainerIdProto container_id = 1; optional ContainerStateProto state = 2; optional string diagnostics = 3 [default = "N/A"]; - optional string exit_status = 4 [default = "N/A"]; + optional int32 exit_status = 4 [default = -1000]; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 772c6688d4d..2169ee3e908 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -351,6 +351,8 @@ public class YarnConfiguration extends Configuration { public static final String NM_AUX_SERVICE_FMT = NM_PREFIX + "aux-services.%s.class"; + public static final int INVALID_CONTAINER_EXIT_STATUS = -1000; + public static final int ABORTED_CONTAINER_EXIT_STATUS = -100; public YarnConfiguration() { super(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java index 521ca27c770..c2510bca3df 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ContainerTokenIdentifier.java @@ -68,28 +68,42 @@ public class ContainerTokenIdentifier extends TokenIdentifier { @Override public void write(DataOutput out) throws IOException { LOG.debug("Writing ContainerTokenIdentifier to RPC layer"); - out.writeInt(this.containerId.getAppId().getId()); - out.writeInt(this.containerId.getAppAttemptId().getAttemptId()); + ApplicationAttemptId applicationAttemptId = + containerId.getApplicationAttemptId(); + ApplicationId applicationId = applicationAttemptId.getApplicationId(); + out.writeLong(applicationId.getClusterTimestamp()); + out.writeInt(applicationId.getId()); + out.writeInt(applicationAttemptId.getAttemptId()); out.writeInt(this.containerId.getId()); - // TODO: Cluster time-stamp? out.writeUTF(this.nmHostName); - out.writeInt(this.resource.getMemory()); // TODO: more resources. + out.writeInt(this.resource.getMemory()); } @Override public void readFields(DataInput in) throws IOException { - this.containerId = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ContainerId.class); - this.containerId.setAppId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationId.class)); - this.containerId.setAppAttemptId(RecordFactoryProvider.getRecordFactory(null).newRecordInstance(ApplicationAttemptId.class)); - this.containerId.getAppId().setId(in.readInt()); - this.containerId.getAppAttemptId().setApplicationId(this.containerId.getAppId()); - this.containerId.getAppAttemptId().setAttemptId(in.readInt()); + this.containerId = + RecordFactoryProvider.getRecordFactory(null).newRecordInstance( + ContainerId.class); + ApplicationAttemptId applicationAttemptId = + RecordFactoryProvider.getRecordFactory(null).newRecordInstance( + ApplicationAttemptId.class); + ApplicationId applicationId = + RecordFactoryProvider.getRecordFactory(null).newRecordInstance( + ApplicationId.class); + applicationId.setClusterTimestamp(in.readLong()); + applicationId.setId(in.readInt()); + applicationAttemptId.setApplicationId(applicationId); + applicationAttemptId.setAttemptId(in.readInt()); + this.containerId.setApplicationAttemptId(applicationAttemptId); this.containerId.setId(in.readInt()); this.nmHostName = in.readUTF(); - this.resource = RecordFactoryProvider.getRecordFactory(null).newRecordInstance(Resource.class); - this.resource.setMemory(in.readInt()); // TODO: more resources. + this.resource = + RecordFactoryProvider.getRecordFactory(null).newRecordInstance( + Resource.class); + this.resource.setMemory(in.readInt()); } + @SuppressWarnings("static-access") @Override public Text getKind() { return this.KIND; diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java index cfff2fde827..4eb63c04470 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java @@ -152,9 +152,8 @@ public class BuilderUtils { public static ContainerId newContainerId(ApplicationAttemptId appAttemptId, int containerId) { ContainerId id = recordFactory.newRecordInstance(ContainerId.class); - id.setAppId(appAttemptId.getApplicationId()); id.setId(containerId); - id.setAppAttemptId(appAttemptId); + id.setApplicationAttemptId(appAttemptId); return id; } @@ -171,9 +170,8 @@ public class BuilderUtils { ApplicationId appId, ApplicationAttemptId appAttemptId, int containerId) { ContainerId id = recordFactory.newRecordInstance(ContainerId.class); - id.setAppId(appId); id.setId(containerId); - id.setAppAttemptId(appAttemptId); + id.setApplicationAttemptId(appAttemptId); return id; } @@ -181,8 +179,7 @@ public class BuilderUtils { ApplicationAttemptId appAttemptId, int containerId) { ContainerId id = recordFactory.newRecordInstance(ContainerId.class); - id.setAppAttemptId(appAttemptId); - id.setAppId(appAttemptId.getApplicationId()); + id.setApplicationAttemptId(appAttemptId); id.setId(containerId); return id; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java index 0d02cb96b48..ab6bd7395dc 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/ConverterUtils.java @@ -29,6 +29,7 @@ import java.util.Map; import java.util.Map.Entry; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.URL; @@ -130,6 +131,20 @@ public class ConverterUtils { return appId; } + private static ApplicationAttemptId toApplicationAttemptId( + RecordFactory recordFactory, + Iterator it) { + ApplicationId appId = + recordFactory.newRecordInstance(ApplicationId.class); + appId.setClusterTimestamp(Long.parseLong(it.next())); + appId.setId(Integer.parseInt(it.next())); + ApplicationAttemptId appAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); + appAttemptId.setApplicationId(appId); + appAttemptId.setAttemptId(Integer.parseInt(it.next())); + return appAttemptId; + } + public static String toString(ContainerId cId) { return cId.toString(); } @@ -138,10 +153,11 @@ public class ConverterUtils { String containerIdStr) { Iterator it = _split(containerIdStr).iterator(); it.next(); // prefix. TODO: Validate container prefix - ApplicationId appID = toApplicationId(recordFactory, it); + ApplicationAttemptId appAttemptID = + toApplicationAttemptId(recordFactory, it); ContainerId containerId = recordFactory.newRecordInstance(ContainerId.class); - containerId.setAppId(appID); + containerId.setApplicationAttemptId(appAttemptID); containerId.setId(Integer.parseInt(it.next())); return containerId; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java index 948be4ef682..58efcc42307 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/TestRPC.java @@ -32,6 +32,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; @@ -81,21 +82,34 @@ public class TestRPC { ContainerManager proxy = (ContainerManager) rpc.getProxy(ContainerManager.class, NetUtils.createSocketAddr("localhost:" + server.getPort()), conf); - ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); containerLaunchContext.setUser("dummy-user"); - containerLaunchContext.setContainerId(recordFactory.newRecordInstance(ContainerId.class)); - containerLaunchContext.getContainerId().setAppId(recordFactory.newRecordInstance(ApplicationId.class)); - containerLaunchContext.getContainerId().getAppId().setId(0); - containerLaunchContext.getContainerId().setId(100); - containerLaunchContext.setResource(recordFactory.newRecordInstance(Resource.class)); + ContainerId containerId = + recordFactory.newRecordInstance(ContainerId.class); + ApplicationId applicationId = + recordFactory.newRecordInstance(ApplicationId.class); + ApplicationAttemptId applicationAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); + applicationId.setClusterTimestamp(0); + applicationId.setId(0); + applicationAttemptId.setApplicationId(applicationId); + applicationAttemptId.setAttemptId(0); + containerId.setApplicationAttemptId(applicationAttemptId); + containerId.setId(100); + containerLaunchContext.setContainerId(containerId); + containerLaunchContext.setResource( + recordFactory.newRecordInstance(Resource.class)); // containerLaunchContext.env = new HashMap(); // containerLaunchContext.command = new ArrayList(); - StartContainerRequest scRequest = recordFactory.newRecordInstance(StartContainerRequest.class); + StartContainerRequest scRequest = + recordFactory.newRecordInstance(StartContainerRequest.class); scRequest.setContainerLaunchContext(containerLaunchContext); proxy.startContainer(scRequest); - GetContainerStatusRequest gcsRequest = recordFactory.newRecordInstance(GetContainerStatusRequest.class); + GetContainerStatusRequest gcsRequest = + recordFactory.newRecordInstance(GetContainerStatusRequest.class); gcsRequest.setContainerId(containerLaunchContext.getContainerId()); GetContainerStatusResponse response = proxy.getContainerStatus(gcsRequest); ContainerStatus status = response.getStatus(); @@ -118,7 +132,7 @@ public class TestRPC { server.close(); Assert.assertNotNull(status); - Assert.assertEquals(ContainerState.RUNNING, status.getState().RUNNING); + Assert.assertEquals(ContainerState.RUNNING, status.getState()); } public class DummyContainerManager implements ContainerManager { @@ -126,28 +140,35 @@ public class TestRPC { private ContainerStatus status = null; @Override - public GetContainerStatusResponse getContainerStatus(GetContainerStatusRequest request) throws YarnRemoteException { - GetContainerStatusResponse response = recordFactory.newRecordInstance(GetContainerStatusResponse.class); + public GetContainerStatusResponse getContainerStatus( + GetContainerStatusRequest request) + throws YarnRemoteException { + GetContainerStatusResponse response = + recordFactory.newRecordInstance(GetContainerStatusResponse.class); response.setStatus(status); return response; } @Override - public StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException { + public StartContainerResponse startContainer(StartContainerRequest request) + throws YarnRemoteException { ContainerLaunchContext container = request.getContainerLaunchContext(); - StartContainerResponse response = recordFactory.newRecordInstance(StartContainerResponse.class); + StartContainerResponse response = + recordFactory.newRecordInstance(StartContainerResponse.class); status = recordFactory.newRecordInstance(ContainerStatus.class); status.setState(ContainerState.RUNNING); status.setContainerId(container.getContainerId()); - status.setExitStatus(String.valueOf(0)); + status.setExitStatus(0); return response; } @Override - public StopContainerResponse stopContainer(StopContainerRequest request) throws YarnRemoteException { + public StopContainerResponse stopContainer(StopContainerRequest request) + throws YarnRemoteException { Exception e = new Exception(EXCEPTION_MSG, new Exception(EXCEPTION_CAUSE)); - throw YarnRemoteExceptionFactoryProvider.getYarnRemoteExceptionFactory(null).createYarnRemoteException(e); + throw YarnRemoteExceptionFactoryProvider + .getYarnRemoteExceptionFactory(null).createYarnRemoteException(e); } } } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java index 8419bd21785..8dccb34c2a7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-common/src/test/java/org/apache/hadoop/yarn/api/TestContainerId.java @@ -40,8 +40,7 @@ public class TestContainerId { ApplicationAttemptId appAttemptId = createAppAttemptId(appId, appAttemptIdInt); ContainerId containerId = Records.newRecord(ContainerId.class); - containerId.setAppAttemptId(appAttemptId); - containerId.setAppId(appId); + containerId.setApplicationAttemptId(appAttemptId); containerId.setId(containerIdInt); return containerId; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java index a251a7fd52d..a7e82a2d41a 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/DefaultContainerExecutor.java @@ -97,7 +97,9 @@ public class DefaultContainerExecutor extends ContainerExecutor { // create container dirs on all disks String containerIdStr = ConverterUtils.toString(containerId); String appIdStr = - ConverterUtils.toString(container.getContainerID().getAppId()); + ConverterUtils.toString( + container.getContainerID().getApplicationAttemptId(). + getApplicationId()); String[] sLocalDirs = getConf().getStrings(YarnConfiguration.NM_LOCAL_DIRS, YarnConfiguration.DEFAULT_NM_LOCAL_DIRS); for (String sLocalDir : sLocalDirs) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java index f2d3a7cff51..8e90552d86e 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java @@ -250,7 +250,8 @@ public class ContainerManagerImpl extends CompositeService implements Container container = new ContainerImpl(this.dispatcher, launchContext, credentials, metrics); ContainerId containerID = launchContext.getContainerId(); - ApplicationId applicationID = containerID.getAppId(); + ApplicationId applicationID = + containerID.getApplicationAttemptId().getApplicationId(); if (context.getContainers().putIfAbsent(containerID, container) != null) { NMAuditLogger.logFailure(launchContext.getUser(), AuditConstants.START_CONTAINER, "ContainerManagerImpl", @@ -305,7 +306,8 @@ public class ContainerManagerImpl extends CompositeService implements NMAuditLogger.logFailure(userName, AuditConstants.STOP_CONTAINER, "ContainerManagerImpl", "Trying to stop unknown container!", - containerID.getAppId(), containerID); + containerID.getApplicationAttemptId().getApplicationId(), + containerID); return response; // Return immediately. } dispatcher.getEventHandler().handle( @@ -317,7 +319,8 @@ public class ContainerManagerImpl extends CompositeService implements // should be the same or should be rejected by auth before here. NMAuditLogger.logSuccess(container.getUser(), AuditConstants.STOP_CONTAINER, "ContainerManageImpl", - containerID.getAppId(), containerID); + containerID.getApplicationAttemptId().getApplicationId(), + containerID); // TODO: Move this code to appropriate place once kill_container is // implemented. diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java index 288662155f1..6b8007f3fe7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationContainerFinishedEvent.java @@ -25,7 +25,7 @@ public class ApplicationContainerFinishedEvent extends ApplicationEvent { public ApplicationContainerFinishedEvent( ContainerId containerID) { - super(containerID.getAppId(), + super(containerID.getApplicationAttemptId().getApplicationId(), ApplicationEventType.APPLICATION_CONTAINER_FINISHED); this.containerID = containerID; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java index 15c048a7fae..861d8579500 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/application/ApplicationInitEvent.java @@ -25,7 +25,7 @@ public class ApplicationInitEvent extends ApplicationEvent { private final Container container; public ApplicationInitEvent(Container container) { - super(container.getContainerID().getAppId(), + super(container.getContainerID().getApplicationAttemptId().getApplicationId(), ApplicationEventType.INIT_APPLICATION); this.container = container; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java index 5bdb96d6e28..4e02c3adede 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/ContainerImpl.java @@ -40,6 +40,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -53,9 +54,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.Conta import org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher.ContainersLauncherEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.LocalResourceRequest; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationCleanupEvent; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.ContainerLocalizationRequestEvent; -import org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.event.LocalizationEventType; import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.event.LogAggregatorContainerFinishedEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStartMonitoringEvent; import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainerStopMonitoringEvent; @@ -75,7 +74,7 @@ public class ContainerImpl implements Container { private final Credentials credentials; private final NodeManagerMetrics metrics; private final ContainerLaunchContext launchContext; - private String exitCode = "NA"; + private int exitCode = YarnConfiguration.INVALID_CONTAINER_EXIT_STATUS; private final StringBuilder diagnostics; private static final Log LOG = LogFactory.getLog(Container.class); @@ -345,7 +344,7 @@ public class ContainerImpl implements Container { containerStatus.setState(getCurrentState()); containerStatus.setContainerId(this.launchContext.getContainerId()); containerStatus.setDiagnostics(diagnostics.toString()); - containerStatus.setExitStatus(String.valueOf(exitCode)); + containerStatus.setExitStatus(exitCode); return containerStatus; } finally { this.readLock.unlock(); @@ -360,7 +359,8 @@ public class ContainerImpl implements Container { metrics.completedContainer(); NMAuditLogger.logSuccess(getUser(), AuditConstants.FINISH_SUCCESS_CONTAINER, "ContainerImpl", - getContainerID().getAppId(), getContainerID()); + getContainerID().getApplicationAttemptId().getApplicationId(), + getContainerID()); break; case EXITED_WITH_FAILURE: metrics.endRunningContainer(); @@ -370,7 +370,8 @@ public class ContainerImpl implements Container { NMAuditLogger.logFailure(getUser(), AuditConstants.FINISH_FAILED_CONTAINER, "ContainerImpl", "Container failed with state: " + getContainerState(), - getContainerID().getAppId(), getContainerID()); + getContainerID().getApplicationAttemptId().getApplicationId(), + getContainerID()); break; case CONTAINER_CLEANEDUP_AFTER_KILL: metrics.endRunningContainer(); @@ -379,13 +380,15 @@ public class ContainerImpl implements Container { metrics.killedContainer(); NMAuditLogger.logSuccess(getUser(), AuditConstants.FINISH_KILLED_CONTAINER, "ContainerImpl", - getContainerID().getAppId(), getContainerID()); + getContainerID().getApplicationAttemptId().getApplicationId(), + getContainerID()); } metrics.releaseContainer(getLaunchContext().getResource()); // Inform the application ContainerId containerID = getContainerID(); + @SuppressWarnings("rawtypes") EventHandler eventHandler = dispatcher.getEventHandler(); eventHandler.handle(new ApplicationContainerFinishedEvent(containerID)); // Remove the container from the resource-monitor @@ -433,20 +436,21 @@ public class ContainerImpl implements Container { container.metrics.initingContainer(); // Inform the AuxServices about the opaque serviceData - Map csd = ctxt.getAllServiceData(); + Map csd = ctxt.getServiceData(); if (csd != null) { // This can happen more than once per Application as each container may // have distinct service data for (Map.Entry service : csd.entrySet()) { container.dispatcher.getEventHandler().handle( new AuxServicesEvent(AuxServicesEventType.APPLICATION_INIT, - ctxt.getUser(), ctxt.getContainerId().getAppId(), + ctxt.getUser(), + ctxt.getContainerId().getApplicationAttemptId().getApplicationId(), service.getKey().toString(), service.getValue())); } } // Send requests for public, private resources - Map cntrRsrc = ctxt.getAllLocalResources(); + Map cntrRsrc = ctxt.getLocalResources(); if (!cntrRsrc.isEmpty()) { try { for (Map.Entry rsrc : cntrRsrc.entrySet()) { @@ -562,7 +566,7 @@ public class ContainerImpl implements Container { @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerExitEvent exitEvent = (ContainerExitEvent) event; - container.exitCode = String.valueOf(exitEvent.getExitCode()); + container.exitCode = exitEvent.getExitCode(); // TODO: Add containerWorkDir to the deletion service. // TODO: Add containerOuputDir to the deletion service. @@ -640,7 +644,7 @@ public class ContainerImpl implements Container { @Override public void transition(ContainerImpl container, ContainerEvent event) { ContainerExitEvent exitEvent = (ContainerExitEvent) event; - container.exitCode = String.valueOf(exitEvent.getExitCode()); + container.exitCode = exitEvent.getExitCode(); // The process/process-grp is killed. Decrement reference counts and // cleanup resources diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java index 6a8e0f91d93..1a34247c306 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainerLaunch.java @@ -89,8 +89,8 @@ public class ContainerLaunch implements Callable { final Map localResources = container.getLocalizedResources(); String containerIdStr = ConverterUtils.toString(container.getContainerID()); final String user = launchContext.getUser(); - final Map env = launchContext.getAllEnv(); - final List command = launchContext.getCommandList(); + final Map env = launchContext.getEnv(); + final List command = launchContext.getCommands(); int ret = -1; try { @@ -107,10 +107,9 @@ public class ContainerLaunch implements Callable { newCmds.add(str.replace(ApplicationConstants.LOG_DIR_EXPANSION_VAR, containerLogDir.toUri().getPath())); } - launchContext.clearCommands(); - launchContext.addAllCommands(newCmds); + launchContext.setCommands(newCmds); - Map envs = launchContext.getAllEnv(); + Map envs = launchContext.getEnv(); Map newEnvs = new HashMap(envs.size()); for (Entry entry : envs.entrySet()) { newEnvs.put( @@ -119,8 +118,7 @@ public class ContainerLaunch implements Callable { ApplicationConstants.LOG_DIR_EXPANSION_VAR, containerLogDir.toUri().getPath())); } - launchContext.clearEnv(); - launchContext.addAllEnv(newEnvs); + launchContext.setEnv(newEnvs); // /////////////////////////// End of variable expansion FileContext lfs = FileContext.getLocalFSFileContext(); @@ -170,7 +168,7 @@ public class ContainerLaunch implements Callable { containerWorkDir, FINAL_CONTAINER_TOKENS_FILE).toUri().getPath()); writeLaunchEnv(containerScriptOutStream, env, localResources, - launchContext.getCommandList(), appDirs); + launchContext.getCommands(), appDirs); // /////////// End of writing out container-script // /////////// Write out the container-tokens in the nmPrivate space. diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java index ded92861387..048166ef25f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/launcher/ContainersLauncher.java @@ -103,7 +103,8 @@ public class ContainersLauncher extends AbstractService switch (event.getType()) { case LAUNCH_CONTAINER: Application app = - context.getApplications().get(containerId.getAppId()); + context.getApplications().get( + containerId.getApplicationAttemptId().getApplicationId()); ContainerLaunch launch = new ContainerLaunch(getConfig(), dispatcher, exec, app, event.getContainer()); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java index 4747360d287..d120b5ccdf9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/ResourceLocalizationService.java @@ -292,7 +292,7 @@ public class ResourceLocalizationService extends AbstractService for (Map.Entry> e : rsrcs.entrySet()) { tracker = getLocalResourcesTracker(e.getKey(), c.getUser(), - c.getContainerID().getAppId()); + c.getContainerID().getApplicationAttemptId().getApplicationId()); for (LocalResourceRequest req : e.getValue()) { tracker.handle(new ResourceRequestEvent(req, e.getKey(), ctxt)); } @@ -316,7 +316,7 @@ public class ResourceLocalizationService extends AbstractService for (Map.Entry> e : rsrcs.entrySet()) { tracker = getLocalResourcesTracker(e.getKey(), c.getUser(), - c.getContainerID().getAppId()); + c.getContainerID().getApplicationAttemptId().getApplicationId()); for (LocalResourceRequest req : e.getValue()) { tracker.handle(new ResourceReleaseEvent(req, c.getContainerID())); } @@ -326,7 +326,8 @@ public class ResourceLocalizationService extends AbstractService userName = c.getUser(); String containerIDStr = c.toString(); appIDStr = - ConverterUtils.toString(c.getContainerID().getAppId()); + ConverterUtils.toString( + c.getContainerID().getApplicationAttemptId().getApplicationId()); for (Path localDir : localDirs) { // Delete the user-owned container-dir @@ -789,7 +790,9 @@ public class ResourceLocalizationService extends AbstractService // 2) exec initApplication and wait exec.startLocalizer(nmPrivateCTokensPath, localizationServerAddress, context.getUser(), - ConverterUtils.toString(context.getContainerId().getAppId()), + ConverterUtils.toString( + context.getContainerId(). + getApplicationAttemptId().getApplicationId()), localizerId, localDirs); // TODO handle ExitCodeException separately? } catch (Exception e) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AggregatedLogFormat.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AggregatedLogFormat.java index fe4adb67bc6..eb22ce22b7f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AggregatedLogFormat.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/AggregatedLogFormat.java @@ -90,8 +90,11 @@ public class AggregatedLogFormat { public void write(DataOutputStream out) throws IOException { for (String rootLogDir : this.rootLogDirs) { File appLogDir = - new File(rootLogDir, ConverterUtils.toString(this.containerId - .getAppId())); + new File(rootLogDir, + ConverterUtils.toString( + this.containerId.getApplicationAttemptId(). + getApplicationId()) + ); File containerLogDir = new File(appLogDir, ConverterUtils.toString(this.containerId)); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java index 14ba5ef292a..974455c4602 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/LogAggregationService.java @@ -172,11 +172,13 @@ public class LogAggregationService extends AbstractService implements // A container is complete. Put this containers' logs up for aggregation if // this containers' logs are needed. - if (!this.appLogAggregators.containsKey(containerId.getAppId())) { + if (!this.appLogAggregators.containsKey( + containerId.getApplicationAttemptId().getApplicationId())) { throw new YarnException("Application is not initialized yet for " + containerId); } - this.appLogAggregators.get(containerId.getAppId()) + this.appLogAggregators.get( + containerId.getApplicationAttemptId().getApplicationId()) .startContainerLogAggregation(containerId, exitCode.equals("0")); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorContainerFinishedEvent.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorContainerFinishedEvent.java index 117d30cf196..68ec27a73a0 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorContainerFinishedEvent.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/event/LogAggregatorContainerFinishedEvent.java @@ -23,10 +23,10 @@ import org.apache.hadoop.yarn.api.records.ContainerId; public class LogAggregatorContainerFinishedEvent extends LogAggregatorEvent { private final ContainerId containerId; - private final String exitCode; + private final int exitCode; public LogAggregatorContainerFinishedEvent(ContainerId containerId, - String exitCode) { + int exitCode) { super(LogAggregatorEventType.CONTAINER_FINISHED); this.containerId = containerId; this.exitCode = exitCode; @@ -36,7 +36,7 @@ public class LogAggregatorContainerFinishedEvent extends LogAggregatorEvent { return this.containerId; } - public String getExitCode() { + public int getExitCode() { return this.exitCode; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java index afaca61ac13..68b0686a254 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java @@ -95,7 +95,8 @@ public class ContainerLogsPage extends NMView { logFile = new File(this.logsSelector .getLocalPathToRead( - ConverterUtils.toString(containerId.getAppId()) + ConverterUtils.toString( + containerId.getApplicationAttemptId().getApplicationId()) + Path.SEPARATOR + $(CONTAINER_ID) + Path.SEPARATOR + $(CONTAINER_LOG_TYPE), this.conf).toUri() @@ -176,7 +177,9 @@ public class ContainerLogsPage extends NMView { conf.getStrings(YarnConfiguration.NM_LOG_DIRS, YarnConfiguration.DEFAULT_NM_LOG_DIRS); List containerLogDirs = new ArrayList(logDirs.length); for (String logDir : logDirs) { - String appIdStr = ConverterUtils.toString(containerId.getAppId()); + String appIdStr = + ConverterUtils.toString( + containerId.getApplicationAttemptId().getApplicationId()); File appLogDir = new File(logDir, appIdStr); String containerIdStr = ConverterUtils.toString(containerId); containerLogDirs.add(new File(appLogDir, containerIdStr)); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java index 1f53817b09f..27be38a0299 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerPage.java @@ -25,6 +25,7 @@ import static org.apache.hadoop.yarn.util.StringHelper.ujoin; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.nodemanager.Context; @@ -69,10 +70,14 @@ public class ContainerPage extends NMView implements NMWebParams { ConverterUtils.toContainerId(this.recordFactory, $(CONTAINER_ID)); Container container = this.nmContext.getContainers().get(containerID); ContainerStatus containerData = container.cloneAndGetContainerStatus(); + int exitCode = containerData.getExitStatus(); + String exiStatus = + (exitCode == YarnConfiguration.INVALID_CONTAINER_EXIT_STATUS) ? + "N/A" : String.valueOf(exitCode); info("Container information") ._("ContainerID", $(CONTAINER_ID)) ._("ContainerState", container.getContainerState()) - ._("ExitStatus", containerData.getExitStatus()) + ._("ExitStatus", exiStatus) ._("Diagnostics", containerData.getDiagnostics()) ._("User", container.getUser()) ._("TotalMemoryNeeded", diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java index 63848f5980d..7fa43b7c785 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java @@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.nodemanager; import java.io.File; import java.io.IOException; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.NodeHealthCheckerService; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; @@ -37,7 +35,6 @@ import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.event.Dispatcher; -import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.server.security.ContainerTokenSecretManager; @@ -49,8 +46,8 @@ import org.junit.Test; public class TestEventFlow { - private static final Log LOG = LogFactory.getLog(TestEventFlow.class); - private static final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); + private static final RecordFactory recordFactory = + RecordFactoryProvider.getRecordFactory(null); private static File localDir = new File("target", TestEventFlow.class.getName() + "-localDir").getAbsoluteFile(); @@ -77,7 +74,8 @@ public class TestEventFlow { YarnConfiguration conf = new YarnConfiguration(); conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath()); conf.set(YarnConfiguration.NM_LOG_DIRS, localLogDir.getAbsolutePath()); - conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, remoteLogDir.getAbsolutePath()); + conf.set(YarnConfiguration.NM_REMOTE_APP_LOG_DIR, + remoteLogDir.getAbsolutePath()); ContainerExecutor exec = new DefaultContainerExecutor(); exec.setConf(conf); @@ -100,27 +98,36 @@ public class TestEventFlow { }; DummyContainerManager containerManager = - new DummyContainerManager(context, exec, del, nodeStatusUpdater, metrics, containerTokenSecretManager); + new DummyContainerManager(context, exec, del, nodeStatusUpdater, + metrics, containerTokenSecretManager); containerManager.init(conf); containerManager.start(); - ContainerLaunchContext launchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); + ContainerLaunchContext launchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); ContainerId cID = recordFactory.newRecordInstance(ContainerId.class); - cID.setAppId(recordFactory.newRecordInstance(ApplicationId.class)); - ApplicationAttemptId atId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - atId.setApplicationId(cID.getAppId()); - cID.setAppAttemptId(atId); + ApplicationId applicationId = + recordFactory.newRecordInstance(ApplicationId.class); + applicationId.setClusterTimestamp(0); + applicationId.setId(0); + ApplicationAttemptId applicationAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); + applicationAttemptId.setApplicationId(applicationId); + applicationAttemptId.setAttemptId(0); + cID.setApplicationAttemptId(applicationAttemptId); launchContext.setContainerId(cID); launchContext.setUser("testing"); launchContext.setResource(recordFactory.newRecordInstance(Resource.class)); - StartContainerRequest request = recordFactory.newRecordInstance(StartContainerRequest.class); + StartContainerRequest request = + recordFactory.newRecordInstance(StartContainerRequest.class); request.setContainerLaunchContext(launchContext); containerManager.startContainer(request); BaseContainerManagerTest.waitForContainerState(containerManager, cID, ContainerState.RUNNING); - StopContainerRequest stopRequest = recordFactory.newRecordInstance(StopContainerRequest.class); + StopContainerRequest stopRequest = + recordFactory.newRecordInstance(StopContainerRequest.class); stopRequest.setContainerId(cID); containerManager.stopContainer(stopRequest); BaseContainerManagerTest.waitForContainerState(containerManager, cID, diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java index 9ff888e6142..2f0e3f54d37 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeStatusUpdater.java @@ -134,7 +134,8 @@ public class TestNodeStatusUpdater { Map> map = new HashMap>(); for (ContainerStatus cs : containers) { - ApplicationId applicationId = cs.getContainerId().getAppId(); + ApplicationId applicationId = + cs.getContainerId().getApplicationAttemptId().getApplicationId(); List appContainers = map.get(applicationId); if (appContainers == null) { appContainers = new ArrayList(); @@ -159,8 +160,7 @@ public class TestNodeStatusUpdater { // Give a container to the NM. applicationID.setId(heartBeatID); appAttemptID.setApplicationId(applicationID); - firstContainerID.setAppId(applicationID); - firstContainerID.setAppAttemptId(appAttemptID); + firstContainerID.setApplicationAttemptId(appAttemptID); firstContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory .newRecordInstance(ContainerLaunchContext.class); @@ -184,8 +184,7 @@ public class TestNodeStatusUpdater { // Give another container to the NM. applicationID.setId(heartBeatID); appAttemptID.setApplicationId(applicationID); - secondContainerID.setAppId(applicationID); - secondContainerID.setAppAttemptId(appAttemptID); + secondContainerID.setApplicationAttemptId(appAttemptID); secondContainerID.setId(heartBeatID); ContainerLaunchContext launchContext = recordFactory .newRecordInstance(ContainerLaunchContext.class); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java index 97890819c8b..87460d045ce 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java @@ -23,7 +23,11 @@ import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; +import java.util.ArrayList; import java.util.Arrays; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import junit.framework.Assert; @@ -67,6 +71,20 @@ public class TestContainerManager extends BaseContainerManagerTest { LOG = LogFactory.getLog(TestContainerManager.class); } + private ContainerId createContainerId() { + ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); + appId.setClusterTimestamp(0); + appId.setId(0); + ApplicationAttemptId appAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); + appAttemptId.setApplicationId(appId); + appAttemptId.setAttemptId(1); + ContainerId containerId = + recordFactory.newRecordInstance(ContainerId.class); + containerId.setApplicationAttemptId(appAttemptId); + return containerId; + } + @Test public void testContainerManagerInitialization() throws IOException { @@ -75,14 +93,9 @@ public class TestContainerManager extends BaseContainerManagerTest { // Just do a query for a non-existing container. boolean throwsException = false; try { - GetContainerStatusRequest request = recordFactory.newRecordInstance(GetContainerStatusRequest.class); - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - appAttemptId.setApplicationId(appId); - appAttemptId.setAttemptId(1); - ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(appId); - cId.setAppAttemptId(appAttemptId); + GetContainerStatusRequest request = + recordFactory.newRecordInstance(GetContainerStatusRequest.class); + ContainerId cId = createContainerId(); request.setContainerId(cId); containerManager.getContainerStatus(request); } catch (YarnRemoteException e) { @@ -107,20 +120,14 @@ public class TestContainerManager extends BaseContainerManagerTest { ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - appAttemptId.setApplicationId(appId); - appAttemptId.setAttemptId(1); - ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(appId); - cId.setAppAttemptId(appAttemptId); + ContainerId cId = createContainerId(); container.setContainerId(cId); container.setUser(user); // ////// Construct the container-spec. - ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); -// containerLaunchContext.resources = new HashMap(); + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(file.getAbsolutePath()))); @@ -131,14 +138,17 @@ public class TestContainerManager extends BaseContainerManagerTest { rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(file.lastModified()); String destinationFile = "dest_file"; - containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha); + Map localResources = + new HashMap(); + localResources.put(destinationFile, rsrc_alpha); + containerLaunchContext.setLocalResources(localResources); containerLaunchContext.setUser(container.getUser()); containerLaunchContext.setContainerId(container.getContainerId()); containerLaunchContext.setResource(recordFactory .newRecordInstance(Resource.class)); -// containerLaunchContext.command = new ArrayList(); - StartContainerRequest startRequest = recordFactory.newRecordInstance(StartContainerRequest.class); + StartContainerRequest startRequest = + recordFactory.newRecordInstance(StartContainerRequest.class); startRequest.setContainerLaunchContext(containerLaunchContext); containerManager.startContainer(startRequest); @@ -147,7 +157,7 @@ public class TestContainerManager extends BaseContainerManagerTest { ContainerState.COMPLETE); // Now ascertain that the resources are localised correctly. - // TODO: Don't we need clusterStamp in localDir? + ApplicationId appId = cId.getApplicationAttemptId().getApplicationId(); String appIDStr = ConverterUtils.toString(appId); String containerIDStr = ConverterUtils.toString(cId); File userCacheDir = new File(localDir, ContainerLocalizer.USERCACHE); @@ -187,41 +197,41 @@ public class TestContainerManager extends BaseContainerManagerTest { PrintWriter fileWriter = new PrintWriter(scriptFile); File processStartFile = new File(tmpDir, "start_file.txt").getAbsoluteFile(); - fileWriter.write("\numask 0"); // So that start file is readable by the test. + fileWriter.write("\numask 0"); // So that start file is readable by the test fileWriter.write("\necho Hello World! > " + processStartFile); fileWriter.write("\necho $$ >> " + processStartFile); fileWriter.write("\nexec sleep 100"); fileWriter.close(); - ContainerLaunchContext containerLaunchContext = recordFactory.newRecordInstance(ContainerLaunchContext.class); + ContainerLaunchContext containerLaunchContext = + recordFactory.newRecordInstance(ContainerLaunchContext.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - appAttemptId.setApplicationId(appId); - appAttemptId.setAttemptId(1); - ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(appId); - cId.setAppAttemptId(appAttemptId); + ContainerId cId = createContainerId(); containerLaunchContext.setContainerId(cId); containerLaunchContext.setUser(user); -// containerLaunchContext.resources =new HashMap(); URL resource_alpha = ConverterUtils.getYarnUrlFromPath(localFS .makeQualified(new Path(scriptFile.getAbsolutePath()))); - LocalResource rsrc_alpha = recordFactory.newRecordInstance(LocalResource.class); + LocalResource rsrc_alpha = + recordFactory.newRecordInstance(LocalResource.class); rsrc_alpha.setResource(resource_alpha); rsrc_alpha.setSize(-1); rsrc_alpha.setVisibility(LocalResourceVisibility.APPLICATION); rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; - containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha); + Map localResources = + new HashMap(); + localResources.put(destinationFile, rsrc_alpha); + containerLaunchContext.setLocalResources(localResources); containerLaunchContext.setUser(containerLaunchContext.getUser()); - containerLaunchContext.addCommand("/bin/bash"); - containerLaunchContext.addCommand(scriptFile.getAbsolutePath()); + List commands = new ArrayList(); + commands.add("/bin/bash"); + commands.add(scriptFile.getAbsolutePath()); + containerLaunchContext.setCommands(commands); containerLaunchContext.setResource(recordFactory .newRecordInstance(Resource.class)); containerLaunchContext.getResource().setMemory(100 * 1024 * 1024); @@ -264,10 +274,12 @@ public class TestContainerManager extends BaseContainerManagerTest { BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); - GetContainerStatusRequest gcsRequest = recordFactory.newRecordInstance(GetContainerStatusRequest.class); + GetContainerStatusRequest gcsRequest = + recordFactory.newRecordInstance(GetContainerStatusRequest.class); gcsRequest.setContainerId(cId); - ContainerStatus containerStatus = containerManager.getContainerStatus(gcsRequest).getStatus(); - Assert.assertEquals(String.valueOf(ExitCode.KILLED.getExitCode()), + ContainerStatus containerStatus = + containerManager.getContainerStatus(gcsRequest).getStatus(); + Assert.assertEquals(ExitCode.KILLED.getExitCode(), containerStatus.getExitStatus()); // Assert that the process is not alive anymore @@ -300,13 +312,8 @@ public class TestContainerManager extends BaseContainerManagerTest { ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class); // ////// Construct the Container-id - ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); - appAttemptId.setApplicationId(appId); - appAttemptId.setAttemptId(1); - ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(appId); - cId.setAppAttemptId(appAttemptId); + ContainerId cId = createContainerId(); + ApplicationId appId = cId.getApplicationAttemptId().getApplicationId(); container.setContainerId(cId); container.setUser(user); @@ -325,7 +332,10 @@ public class TestContainerManager extends BaseContainerManagerTest { rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(file.lastModified()); String destinationFile = "dest_file"; - containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha); + Map localResources = + new HashMap(); + localResources.put(destinationFile, rsrc_alpha); + containerLaunchContext.setLocalResources(localResources); containerLaunchContext.setUser(container.getUser()); containerLaunchContext.setContainerId(container.getContainerId()); containerLaunchContext.setResource(recordFactory @@ -340,7 +350,8 @@ public class TestContainerManager extends BaseContainerManagerTest { BaseContainerManagerTest.waitForContainerState(containerManager, cId, ContainerState.COMPLETE); - BaseContainerManagerTest.waitForApplicationState(containerManager, cId.getAppId(), + BaseContainerManagerTest.waitForApplicationState(containerManager, + cId.getApplicationAttemptId().getApplicationId(), ApplicationState.RUNNING); // Now ascertain that the resources are localised correctly. @@ -372,7 +383,8 @@ public class TestContainerManager extends BaseContainerManagerTest { containerManager.handle(new CMgrCompletedAppsEvent(Arrays .asList(new ApplicationId[] { appId }))); - BaseContainerManagerTest.waitForApplicationState(containerManager, cId.getAppId(), + BaseContainerManagerTest.waitForApplicationState(containerManager, + cId.getApplicationAttemptId().getApplicationId(), ApplicationState.FINISHED); // Now ascertain that the resources are localised correctly. diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java index 54dc85a98f1..04d400ad18d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/container/TestContainer.java @@ -418,7 +418,7 @@ public class TestContainer { } else { localResources = Collections. emptyMap(); } - when(ctxt.getAllLocalResources()).thenReturn(localResources); + when(ctxt.getLocalResources()).thenReturn(localResources); if (withServiceData) { Random r = new Random(); @@ -429,7 +429,7 @@ public class TestContainer { } else { serviceData = Collections. emptyMap(); } - when(ctxt.getAllServiceData()).thenReturn(serviceData); + when(ctxt.getServiceData()).thenReturn(serviceData); c = newContainer(dispatcher, ctxt); dispatcher.start(); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java index 2a9f44530ab..a1c6bb84793 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalizedResource.java @@ -19,6 +19,7 @@ package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import org.apache.hadoop.fs.Path; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.LocalResource; @@ -48,9 +49,12 @@ public class TestLocalizedResource { ApplicationId appId = mock(ApplicationId.class); when(appId.getClusterTimestamp()).thenReturn(314159265L); when(appId.getId()).thenReturn(3); + ApplicationAttemptId appAttemptId = mock(ApplicationAttemptId.class); + when(appAttemptId.getApplicationId()).thenReturn(appId); + when(appAttemptId.getAttemptId()).thenReturn(0); ContainerId container = mock(ContainerId.class); when(container.getId()).thenReturn(id); - when(container.getAppId()).thenReturn(appId); + when(container.getApplicationAttemptId()).thenReturn(appAttemptId); return container; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java index 53ac9405970..164039e0373 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java @@ -25,8 +25,10 @@ import java.io.FileWriter; import java.io.IOException; import java.io.PrintWriter; import java.io.Writer; +import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; +import java.util.List; import java.util.Map; import junit.framework.Assert; @@ -118,8 +120,8 @@ public class TestLogAggregationService extends BaseContainerManagerTest { BuilderUtils.newContainerId(recordFactory, application1, appAttemptId, 1); // Simulate log-file creation writeContainerLogs(app1LogDir, container11); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container11, "0")); + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container11, 0)); logAggregationService.handle(new LogAggregatorAppFinishedEvent( application1)); @@ -192,17 +194,19 @@ public class TestLogAggregationService extends BaseContainerManagerTest { application1, this.user, null, ContainerLogsRetentionPolicy.ALL_CONTAINERS)); - ApplicationAttemptId appAttemptId1 = recordFactory.newRecordInstance(ApplicationAttemptId.class); + ApplicationAttemptId appAttemptId1 = + recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId1.setApplicationId(application1); ContainerId container11 = BuilderUtils.newContainerId(recordFactory, application1, appAttemptId1, 1); // Simulate log-file creation writeContainerLogs(app1LogDir, container11); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container11, "0")); + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container11, 0)); ApplicationId application2 = BuilderUtils.newApplicationId(1234, 2); - ApplicationAttemptId appAttemptId2 = recordFactory.newRecordInstance(ApplicationAttemptId.class); + ApplicationAttemptId appAttemptId2 = + recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId1.setApplicationId(application2); File app2LogDir = @@ -214,19 +218,22 @@ public class TestLogAggregationService extends BaseContainerManagerTest { ContainerId container21 = - BuilderUtils.newContainerId(recordFactory, application2, appAttemptId2, 1); + BuilderUtils.newContainerId(recordFactory, application2, + appAttemptId2, 1); writeContainerLogs(app2LogDir, container21); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container21, "0")); + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container21, 0)); ContainerId container12 = - BuilderUtils.newContainerId(recordFactory, application1, appAttemptId1, 2); + BuilderUtils.newContainerId(recordFactory, application1, appAttemptId1, + 2); writeContainerLogs(app1LogDir, container12); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container12, "0")); + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container12, 0)); ApplicationId application3 = BuilderUtils.newApplicationId(1234, 3); - ApplicationAttemptId appAttemptId3 = recordFactory.newRecordInstance(ApplicationAttemptId.class); + ApplicationAttemptId appAttemptId3 = + recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId1.setApplicationId(application3); File app3LogDir = @@ -237,28 +244,32 @@ public class TestLogAggregationService extends BaseContainerManagerTest { ContainerLogsRetentionPolicy.AM_AND_FAILED_CONTAINERS_ONLY)); ContainerId container31 = - BuilderUtils.newContainerId(recordFactory, application3, appAttemptId3, 1); + BuilderUtils.newContainerId(recordFactory, application3, appAttemptId3, + 1); writeContainerLogs(app3LogDir, container31); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container31, "0")); + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container31, 0)); ContainerId container32 = - BuilderUtils.newContainerId(recordFactory, application3, appAttemptId3, 2); + BuilderUtils.newContainerId(recordFactory, application3, appAttemptId3, + 2); writeContainerLogs(app3LogDir, container32); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container32, "1")); // Failed container + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container32, 1)); // Failed ContainerId container22 = - BuilderUtils.newContainerId(recordFactory, application2, appAttemptId2, 2); + BuilderUtils.newContainerId(recordFactory, application2, appAttemptId2, + 2); writeContainerLogs(app2LogDir, container22); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container22, "0")); + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container22, 0)); ContainerId container33 = - BuilderUtils.newContainerId(recordFactory, application3, appAttemptId3, 3); + BuilderUtils.newContainerId(recordFactory, application3, appAttemptId3, + 3); writeContainerLogs(app3LogDir, container33); - logAggregationService.handle(new LogAggregatorContainerFinishedEvent( - container33, "0")); + logAggregationService.handle( + new LogAggregatorContainerFinishedEvent(container33, 0)); logAggregationService.handle(new LogAggregatorAppFinishedEvent( application2)); @@ -387,8 +398,15 @@ public class TestLogAggregationService extends BaseContainerManagerTest { // ////// Construct the Container-id ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); + appId.setClusterTimestamp(0); + appId.setId(0); + ApplicationAttemptId appAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); + appAttemptId.setApplicationId(appId); + appAttemptId.setAttemptId(1); ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(appId); + cId.setId(0); + cId.setApplicationAttemptId(appAttemptId); containerLaunchContext.setContainerId(cId); containerLaunchContext.setUser(this.user); @@ -404,10 +422,15 @@ public class TestLogAggregationService extends BaseContainerManagerTest { rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; - containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha); + Map localResources = + new HashMap(); + localResources.put(destinationFile, rsrc_alpha); + containerLaunchContext.setLocalResources(localResources); containerLaunchContext.setUser(containerLaunchContext.getUser()); - containerLaunchContext.addCommand("/bin/bash"); - containerLaunchContext.addCommand(scriptFile.getAbsolutePath()); + List commands = new ArrayList(); + commands.add("/bin/bash"); + commands.add(scriptFile.getAbsolutePath()); + containerLaunchContext.setCommands(commands); containerLaunchContext.setResource(recordFactory .newRecordInstance(Resource.class)); containerLaunchContext.getResource().setMemory(100 * 1024 * 1024); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java index 466b864b9ae..4a2a49c8151 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java @@ -26,6 +26,10 @@ import java.io.File; import java.io.FileReader; import java.io.IOException; import java.io.PrintWriter; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; import java.util.regex.Pattern; import junit.framework.Assert; @@ -192,13 +196,15 @@ public class TestContainersMonitor extends BaseContainerManagerTest { // ////// Construct the Container-id ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class); - ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); + appId.setClusterTimestamp(0); + appId.setId(0); + ApplicationAttemptId appAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appId); appAttemptId.setAttemptId(1); ContainerId cId = recordFactory.newRecordInstance(ContainerId.class); - cId.setAppId(appId); cId.setId(0); - cId.setAppAttemptId(appAttemptId); + cId.setApplicationAttemptId(appAttemptId); containerLaunchContext.setContainerId(cId); containerLaunchContext.setUser(user); @@ -214,10 +220,15 @@ public class TestContainersMonitor extends BaseContainerManagerTest { rsrc_alpha.setType(LocalResourceType.FILE); rsrc_alpha.setTimestamp(scriptFile.lastModified()); String destinationFile = "dest_file"; - containerLaunchContext.setLocalResource(destinationFile, rsrc_alpha); + Map localResources = + new HashMap(); + localResources.put(destinationFile, rsrc_alpha); + containerLaunchContext.setLocalResources(localResources); containerLaunchContext.setUser(containerLaunchContext.getUser()); - containerLaunchContext.addCommand("/bin/bash"); - containerLaunchContext.addCommand(scriptFile.getAbsolutePath()); + List commands = new ArrayList(); + commands.add("/bin/bash"); + commands.add(scriptFile.getAbsolutePath()); + containerLaunchContext.setCommands(commands); containerLaunchContext.setResource(recordFactory .newRecordInstance(Resource.class)); containerLaunchContext.getResource().setMemory(8 * 1024 * 1024); @@ -251,7 +262,7 @@ public class TestContainersMonitor extends BaseContainerManagerTest { gcsRequest.setContainerId(cId); ContainerStatus containerStatus = containerManager.getContainerStatus(gcsRequest).getStatus(); - Assert.assertEquals(String.valueOf(ExitCode.KILLED.getExitCode()), + Assert.assertEquals(ExitCode.KILLED.getExitCode(), containerStatus.getExitStatus()); String expectedMsgPattern = "Container \\[pid=" + pid + ",containerID=" + cId diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java index 9a0750becb6..f84af413bcc 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServer.java @@ -112,7 +112,9 @@ public class TestNMWebServer { }; nmContext.getContainers().put(containerId, container); //TODO: Gross hack. Fix in code. - nmContext.getApplications().get(containerId.getAppId()).getContainers() + ApplicationId applicationId = + containerId.getApplicationAttemptId().getApplicationId(); + nmContext.getApplications().get(applicationId).getContainers() .put(containerId, container); writeContainerLogs(conf, nmContext, containerId); diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java index 22a5aa2942f..1a10993bb08 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java @@ -79,7 +79,8 @@ public class AMLauncher implements Runnable { private final RMAppAttempt application; private final Configuration conf; - private final RecordFactory recordFactory = RecordFactoryProvider.getRecordFactory(null); + private final RecordFactory recordFactory = + RecordFactoryProvider.getRecordFactory(null); private final ApplicationTokenSecretManager applicationTokenSecretManager; private final ClientToAMSecretManager clientToAMSecretManager; private final AMLauncherEventType eventType; @@ -87,9 +88,9 @@ public class AMLauncher implements Runnable { @SuppressWarnings("rawtypes") private final EventHandler handler; - @SuppressWarnings("unchecked") public AMLauncher(RMContext rmContext, RMAppAttempt application, - AMLauncherEventType eventType,ApplicationTokenSecretManager applicationTokenSecretManager, + AMLauncherEventType eventType, + ApplicationTokenSecretManager applicationTokenSecretManager, ClientToAMSecretManager clientToAMSecretManager, Configuration conf) { this.application = application; this.conf = new Configuration(conf); // Just not to touch the sec-info class @@ -106,7 +107,8 @@ public class AMLauncher implements Runnable { ContainerId masterContainerID = application.getMasterContainer().getId(); containerMgrProxy = - getContainerMgrProxy(masterContainerID.getAppId()); + getContainerMgrProxy( + masterContainerID.getApplicationAttemptId().getApplicationId()); } private void launch() throws IOException { @@ -169,12 +171,12 @@ public class AMLauncher implements Runnable { // Construct the actual Container ContainerLaunchContext container = recordFactory.newRecordInstance(ContainerLaunchContext.class); - container.addAllCommands(applicationMasterContext.getCommandList()); + container.setCommands(applicationMasterContext.getCommandList()); StringBuilder mergedCommand = new StringBuilder(); String failCount = Integer.toString(application.getAppAttemptId() .getAttemptId()); List commandList = new ArrayList(); - for (String str : container.getCommandList()) { + for (String str : container.getCommands()) { // This is out-right wrong. AM FAIL count should be passed via env. String result = str.replaceFirst(ApplicationConstants.AM_FAIL_COUNT_STRING, @@ -182,21 +184,21 @@ public class AMLauncher implements Runnable { mergedCommand.append(result).append(" "); commandList.add(result); } - container.clearCommands(); - container.addAllCommands(commandList); + container.setCommands(commandList); /** add the failed count to the app master command line */ LOG.info("Command to launch container " + containerID + " : " + mergedCommand); - container.addAllEnv(applicationMasterContext.getAllEnvironment()); - - container.addAllEnv(setupTokensInEnv(applicationMasterContext)); + Map environment = + applicationMasterContext.getAllEnvironment(); + environment.putAll(setupTokensInEnv(applicationMasterContext)); + container.setEnv(environment); // Construct the actual Container container.setContainerId(containerID); container.setUser(applicationMasterContext.getUser()); container.setResource(applicationMasterContext.getMasterCapability()); - container.addAllLocalResources(applicationMasterContext.getAllResourcesTodo()); + container.setLocalResources(applicationMasterContext.getAllResourcesTodo()); container.setContainerTokens(applicationMasterContext.getFsTokensTodo()); return container; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java index 5f12348d85d..d1ef1d14004 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/ApplicationMasterLauncher.java @@ -37,7 +37,6 @@ public class ApplicationMasterLauncher extends AbstractService implements private static final Log LOG = LogFactory.getLog( ApplicationMasterLauncher.class); private final ThreadPoolExecutor launcherPool; - private final EventHandler handler; private LauncherThread launcherHandlingThread; private final BlockingQueue masterEvents @@ -52,7 +51,6 @@ public class ApplicationMasterLauncher extends AbstractService implements RMContext context) { super(ApplicationMasterLauncher.class.getName()); this.context = context; - this.handler = context.getDispatcher().getEventHandler(); /* register to dispatcher */ this.context.getDispatcher().register(AMLauncherEventType.class, this); this.launcherPool = new ThreadPoolExecutor(1, 10, 1, @@ -67,14 +65,16 @@ public class ApplicationMasterLauncher extends AbstractService implements super.start(); } - protected Runnable createRunnableLauncher(RMAppAttempt application, AMLauncherEventType event) { + protected Runnable createRunnableLauncher(RMAppAttempt application, + AMLauncherEventType event) { Runnable launcher = new AMLauncher(context, application, event, applicationTokenSecretManager, clientToAMSecretManager, getConfig()); return launcher; } private void launch(RMAppAttempt application) { - Runnable launcher = createRunnableLauncher(application, AMLauncherEventType.LAUNCH); + Runnable launcher = createRunnableLauncher(application, + AMLauncherEventType.LAUNCH); masterEvents.add(launcher); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java index 971341445b8..f2e401a6ba4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/ZKStore.java @@ -49,9 +49,7 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationSubmissionContextProto import org.apache.hadoop.yarn.proto.YarnProtos.ContainerProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.NodeReportProto; -import org.apache.hadoop.yarn.server.resourcemanager.ResourceTrackerService; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; -import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNodeImpl; import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; @@ -180,7 +178,8 @@ public class ZKStore implements Store { } private String containerPathFromContainerId(ContainerId containerId) { - String appString = ConverterUtils.toString(containerId.getAppId()); + String appString = ConverterUtils.toString( + containerId.getApplicationAttemptId().getApplicationId()); return appString + "/" + containerId.getId(); } @@ -197,7 +196,10 @@ public class ZKStore implements Store { ContainerPBImpl containerPBImpl = (ContainerPBImpl) container; try { - zkClient.setData(APPS + ConverterUtils.toString(container.getId().getAppId()) + + zkClient.setData(APPS + + ConverterUtils.toString( + container.getId().getApplicationAttemptId().getApplicationId()) + + ZK_PATH_SEPARATOR + APP_MASTER_CONTAINER , containerPBImpl.getProto().toByteArray(), -1); } catch(InterruptedException ie) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java index 739b8b6a6cb..b4037aaeaf7 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApp.java @@ -215,7 +215,8 @@ public class SchedulerApp { Resources.addTo(currentConsumption, container.getResource()); if (LOG.isDebugEnabled()) { - LOG.debug("allocate: applicationId=" + container.getId().getAppId() + LOG.debug("allocate: applicationAttemptId=" + + container.getId().getApplicationAttemptId() + " container=" + container.getId() + " host=" + container.getNodeId().getHost() + " type=" + type); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java index a6664780357..7e51841495f 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerNode.java @@ -198,8 +198,8 @@ public class SchedulerNode { } // Cannot reserve more than one application on a given node! - if (!this.reservedContainer.getContainer().getId().getAppAttemptId().equals( - reservedContainer.getContainer().getId().getAppAttemptId())) { + if (!this.reservedContainer.getContainer().getId().getApplicationAttemptId().equals( + reservedContainer.getContainer().getId().getApplicationAttemptId())) { throw new IllegalStateException("Trying to reserve" + " container " + reservedContainer + " for application " + application.getApplicationId() + @@ -221,7 +221,7 @@ public class SchedulerNode { public synchronized void unreserveResource(SchedulerApp application) { // Cannot unreserve for wrong application... ApplicationAttemptId reservedApplication = - reservedContainer.getContainer().getId().getAppAttemptId(); + reservedContainer.getContainer().getId().getApplicationAttemptId(); if (!reservedApplication.equals( application.getApplicationAttemptId())) { throw new IllegalStateException("Trying to unreserve " + diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java index 5b588d1ab6a..8dfe799a041 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerUtils.java @@ -22,6 +22,7 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; @@ -65,7 +66,8 @@ public class SchedulerUtils { recordFactory.newRecordInstance(ContainerStatus.class); containerStatus.setContainerId(containerId); containerStatus.setDiagnostics(diagnostics); - containerStatus.setExitStatus("ABORTED"); + containerStatus.setExitStatus( + YarnConfiguration.ABORTED_CONTAINER_EXIT_STATUS); containerStatus.setState(ContainerState.COMPLETE); return containerStatus; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java index 8f44f1031af..abbe0365bb9 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/CapacityScheduler.java @@ -580,14 +580,15 @@ implements ResourceScheduler, CapacitySchedulerContext { } else { LOG.info("Skipping scheduling since node " + nm + " is reserved by application " + - node.getReservedContainer().getContainerId().getAppId()); + node.getReservedContainer().getContainerId().getApplicationAttemptId() + ); } } private void containerLaunchedOnNode(ContainerId containerId, SchedulerNode node) { // Get the application for the finished container - ApplicationAttemptId applicationAttemptId = containerId.getAppAttemptId(); + ApplicationAttemptId applicationAttemptId = containerId.getApplicationAttemptId(); SchedulerApp application = getApplication(applicationAttemptId); if (application == null) { LOG.info("Unknown application: " + applicationAttemptId + @@ -704,7 +705,7 @@ implements ResourceScheduler, CapacitySchedulerContext { Container container = rmContainer.getContainer(); // Get the application for the finished container - ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId(); + ApplicationAttemptId applicationAttemptId = container.getId().getApplicationAttemptId(); SchedulerApp application = getApplication(applicationAttemptId); if (application == null) { LOG.info("Container " + container + " of" + @@ -739,7 +740,7 @@ implements ResourceScheduler, CapacitySchedulerContext { private RMContainer getRMContainer(ContainerId containerId) { SchedulerApp application = - getApplication(containerId.getAppAttemptId()); + getApplication(containerId.getApplicationAttemptId()); return (application == null) ? null : application.getRMContainer(containerId); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java index b99b9a67134..9b4b3169ff4 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/FifoScheduler.java @@ -645,7 +645,7 @@ public class FifoScheduler implements ResourceScheduler { private void containerLaunchedOnNode(ContainerId containerId, SchedulerNode node) { // Get the application for the finished container - ApplicationAttemptId applicationAttemptId = containerId.getAppAttemptId(); + ApplicationAttemptId applicationAttemptId = containerId.getApplicationAttemptId(); SchedulerApp application = getApplication(applicationAttemptId); if (application == null) { LOG.info("Unknown application: " + applicationAttemptId + @@ -667,7 +667,7 @@ public class FifoScheduler implements ResourceScheduler { // Get the application for the finished container Container container = rmContainer.getContainer(); - ApplicationAttemptId applicationAttemptId = container.getId().getAppAttemptId(); + ApplicationAttemptId applicationAttemptId = container.getId().getApplicationAttemptId(); SchedulerApp application = getApplication(applicationAttemptId); // Get the node on which the container was allocated @@ -751,7 +751,7 @@ public class FifoScheduler implements ResourceScheduler { private RMContainer getRMContainer(ContainerId containerId) { SchedulerApp application = - getApplication(containerId.getAppAttemptId()); + getApplication(containerId.getApplicationAttemptId()); return (application == null) ? null : application.getRMContainer(containerId); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java index 242a815270a..7536857b2cf 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java @@ -56,7 +56,7 @@ public class MockNM { public void containerStatus(Container container) throws Exception { Map> conts = new HashMap>(); - conts.put(container.getId().getAppId(), + conts.put(container.getId().getApplicationAttemptId().getApplicationId(), Arrays.asList(new ContainerStatus[] { container.getContainerStatus() })); nodeHeartbeat(conts, true); } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java index 5f2b47c9ff1..a7b5d02c914 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java @@ -155,11 +155,15 @@ public class NodeManager implements ContainerManager { } @Override - synchronized public StartContainerResponse startContainer(StartContainerRequest request) throws YarnRemoteException { - ContainerLaunchContext containerLaunchContext = request.getContainerLaunchContext(); + synchronized public StartContainerResponse startContainer( + StartContainerRequest request) + throws YarnRemoteException { + ContainerLaunchContext containerLaunchContext = + request.getContainerLaunchContext(); - ApplicationId applicationId = containerLaunchContext.getContainerId() - .getAppId(); + ApplicationId applicationId = + containerLaunchContext.getContainerId().getApplicationAttemptId(). + getApplicationId(); List applicationContainers = containers.get(applicationId); if (applicationContainers == null) { @@ -169,7 +173,8 @@ public class NodeManager implements ContainerManager { // Sanity check for (Container container : applicationContainers) { - if (container.getId().compareTo(containerLaunchContext.getContainerId()) == 0) { + if (container.getId().compareTo(containerLaunchContext.getContainerId()) + == 0) { throw new IllegalStateException( "Container " + containerLaunchContext.getContainerId() + " already setup on node " + containerManagerAddress); @@ -209,7 +214,8 @@ public class NodeManager implements ContainerManager { synchronized public StopContainerResponse stopContainer(StopContainerRequest request) throws YarnRemoteException { ContainerId containerID = request.getContainerId(); - String applicationId = String.valueOf(containerID.getAppId().getId()); + String applicationId = String.valueOf( + containerID.getApplicationAttemptId().getApplicationId().getId()); // Mark the container as COMPLETE List applicationContainers = containers.get(applicationId); @@ -259,7 +265,9 @@ public class NodeManager implements ContainerManager { @Override synchronized public GetContainerStatusResponse getContainerStatus(GetContainerStatusRequest request) throws YarnRemoteException { ContainerId containerId = request.getContainerId(); - List appContainers = containers.get(containerId.getAppId()); + List appContainers = + containers.get( + containerId.getApplicationAttemptId().getApplicationId()); Container container = null; for (Container c : appContainers) { if (c.getId().equals(containerId)) { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index 95690f0f9bc..fcc2c08344d 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -27,19 +27,13 @@ import org.apache.hadoop.yarn.api.records.ApplicationMaster; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationState; import org.apache.hadoop.yarn.api.records.ApplicationStatus; -import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; -import org.apache.hadoop.yarn.api.records.Resource; -import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager; import org.apache.hadoop.yarn.server.resourcemanager.recovery.ApplicationsStore.ApplicationStore; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.Store.RMState; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.AMLivelinessMonitor; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; -import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.Records; import com.google.common.collect.Lists; @@ -218,10 +212,10 @@ public abstract class MockAsm extends MockApps { } public static RMApp newApplication(int i) { - final ApplicationId id = newAppID(i); + final ApplicationAttemptId appAttemptId = newAppAttemptID(newAppID(i), 0); final Container masterContainer = Records.newRecord(Container.class); ContainerId containerId = Records.newRecord(ContainerId.class); - containerId.setAppId(id); + containerId.setApplicationAttemptId(appAttemptId); masterContainer.setId(containerId); masterContainer.setNodeHttpAddress("node:port"); final String user = newUserName(); @@ -233,7 +227,7 @@ public abstract class MockAsm extends MockApps { return new ApplicationBase() { @Override public ApplicationId getApplicationId() { - return id; + return appAttemptId.getApplicationId(); } @Override public String getUser() { diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java index 26eb89524ac..84dbbac8676 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestUtils.java @@ -154,8 +154,8 @@ public class TestUtils { public static ContainerId getMockContainerId(SchedulerApp application) { ContainerId containerId = mock(ContainerId.class); - doReturn(application.getApplicationAttemptId()).when(containerId).getAppAttemptId(); - doReturn(application.getApplicationId()).when(containerId).getAppId(); + doReturn(application.getApplicationAttemptId()). + when(containerId).getApplicationAttemptId(); doReturn(application.getNewContainerId()).when(containerId).getId(); return containerId; } diff --git a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java index 66a582435ab..32148982774 100644 --- a/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java +++ b/hadoop-mapreduce-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerTokenSecretManager.java @@ -224,8 +224,6 @@ public class TestContainerTokenSecretManager { RegisterApplicationMasterRequest request = recordFactory .newRecordInstance(RegisterApplicationMasterRequest.class); - ApplicationMaster applicationMaster = recordFactory - .newRecordInstance(ApplicationMaster.class); request.setApplicationAttemptId(resourceManager.getRMContext() .getRMApps().get(appID).getCurrentAppAttempt().getAppAttemptId()); scheduler.registerApplicationMaster(request); @@ -293,12 +291,13 @@ public class TestContainerTokenSecretManager { .newRecordInstance(GetContainerStatusRequest.class); ContainerId containerID = recordFactory.newRecordInstance(ContainerId.class); - ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); + ApplicationAttemptId appAttemptId = + recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appID); appAttemptId.setAttemptId(1); - containerID.setAppId(appID); + appAttemptId.setApplicationId(appID); + containerID.setApplicationAttemptId(appAttemptId); containerID.setId(1); - containerID.setAppAttemptId(appAttemptId); request.setContainerId(containerID); client.getContainerStatus(request); } catch (YarnRemoteException e) { @@ -347,9 +346,9 @@ public class TestContainerTokenSecretManager { ApplicationAttemptId appAttemptId = recordFactory.newRecordInstance(ApplicationAttemptId.class); appAttemptId.setApplicationId(appID); appAttemptId.setAttemptId(1); - containerID.setAppId(appID); + appAttemptId.setApplicationId(appID); + containerID.setApplicationAttemptId(appAttemptId); containerID.setId(1); - containerID.setAppAttemptId(appAttemptId); request.setContainerId(containerID); try { client.getContainerStatus(request);