YARN-2152. Added missing information into ContainerTokenIdentifier so that NodeManagers can report the same to RM when RM restarts. Contributed Jian He.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1605205 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Vinod Kumar Vavilapalli 2014-06-24 21:43:22 +00:00
parent 0d91576ec3
commit e285b98f0f
37 changed files with 207 additions and 144 deletions

View File

@ -24,8 +24,6 @@ import java.io.IOException;
import java.net.InetSocketAddress;
import java.util.EnumSet;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -92,6 +90,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.event.EventHandler;
@ -101,6 +100,7 @@ import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
import org.apache.hadoop.yarn.util.Clock;
import org.apache.hadoop.yarn.util.SystemClock;
import org.junit.Assert;
/**
@ -573,7 +573,8 @@ public class MRApp extends MRAppMaster {
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(cId, nodeId.toString(), "user",
resource, System.currentTimeMillis() + 10000, 42, 42);
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken = newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);
Container container = Container.newInstance(cId, nodeId,

View File

@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.client.api.impl.ContainerManagementProtocolProxy.ContainerManagementProtocolProxyData;
@ -402,7 +403,7 @@ public class TestContainerLauncherImpl {
1234), "password".getBytes(), new ContainerTokenIdentifier(
contId, containerManagerAddr, "user",
Resource.newInstance(1024, 1),
currentTime + 10000L, 123, currentTime));
currentTime + 10000L, 123, currentTime, Priority.newInstance(0), 0));
}
private static class ContainerManagerForTest implements ContainerManagementProtocol {

View File

@ -186,6 +186,9 @@ Release 2.5.0 - UNRELEASED
YARN-2072. RM/NM UIs and webservices are missing vcore information.
(Nathan Roberts via tgraves)
YARN-2152. Added missing information into ContainerTokenIdentifier so that
NodeManagers can report the same to RM when RM restarts. (Jian He via vinodkv)
OPTIMIZATIONS
BUG FIXES

View File

@ -35,7 +35,7 @@ import org.apache.hadoop.yarn.util.Records;
* <li>Allocated Resources to the container.</li>
* <li>Assigned Node id.</li>
* <li>Assigned Priority.</li>
* <li>Start Time.</li>
* <li>Creation Time.</li>
* <li>Finish Time.</li>
* <li>Container Exit Status.</li>
* <li>{@link ContainerState} of the container.</li>
@ -53,14 +53,14 @@ public abstract class ContainerReport {
@Unstable
public static ContainerReport newInstance(ContainerId containerId,
Resource allocatedResource, NodeId assignedNode, Priority priority,
long startTime, long finishTime, String diagnosticInfo, String logUrl,
long creationTime, long finishTime, String diagnosticInfo, String logUrl,
int containerExitStatus, ContainerState containerState) {
ContainerReport report = Records.newRecord(ContainerReport.class);
report.setContainerId(containerId);
report.setAllocatedResource(allocatedResource);
report.setAssignedNode(assignedNode);
report.setPriority(priority);
report.setStartTime(startTime);
report.setCreationTime(creationTime);
report.setFinishTime(finishTime);
report.setDiagnosticsInfo(diagnosticInfo);
report.setLogUrl(logUrl);
@ -122,17 +122,17 @@ public abstract class ContainerReport {
public abstract void setPriority(Priority priority);
/**
* Get the Start time of the container.
* Get the creation time of the container.
*
* @return Start time of the container
* @return creation time of the container
*/
@Public
@Unstable
public abstract long getStartTime();
public abstract long getCreationTime();
@Public
@Unstable
public abstract void setStartTime(long startTime);
public abstract void setCreationTime(long creationTime);
/**
* Get the Finish time of the container.

View File

@ -92,7 +92,7 @@ message ContainerReportProto {
optional ResourceProto resource = 2;
optional NodeIdProto node_id = 3;
optional PriorityProto priority = 4;
optional int64 start_time = 5;
optional int64 creation_time = 5;
optional int64 finish_time = 6;
optional string diagnostics_info = 7 [default = "N/A"];
optional string log_url = 8;

View File

@ -304,7 +304,7 @@ public class ApplicationCLI extends YarnCLI {
containerReportStr.print("\tContainer-Id : ");
containerReportStr.println(containerReport.getContainerId());
containerReportStr.print("\tStart-Time : ");
containerReportStr.println(containerReport.getStartTime());
containerReportStr.println(containerReport.getCreationTime());
containerReportStr.print("\tFinish-Time : ");
containerReportStr.println(containerReport.getFinishTime());
containerReportStr.print("\tState : ");
@ -525,7 +525,7 @@ public class ApplicationCLI extends YarnCLI {
"Finish Time", "State", "Host", "LOG-URL");
for (ContainerReport containerReport : appsReport) {
writer.printf(CONTAINER_PATTERN, containerReport.getContainerId(),
containerReport.getStartTime(), containerReport.getFinishTime(),
containerReport.getCreationTime(), containerReport.getFinishTime(),
containerReport.getContainerState(), containerReport
.getAssignedNode(), containerReport.getLogUrl());
}

View File

@ -18,24 +18,19 @@
package org.apache.hadoop.yarn.api.records.impl.pb;
import org.apache.hadoop.security.proto.SecurityProtos.TokenProto;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerReport;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerReportProtoOrBuilder;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto;
import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto;
import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto;
import com.google.protobuf.TextFormat;
@ -150,9 +145,9 @@ public class ContainerReportPBImpl extends ContainerReport {
}
@Override
public long getStartTime() {
public long getCreationTime() {
ContainerReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getStartTime();
return p.getCreationTime();
}
@Override
@ -237,9 +232,9 @@ public class ContainerReportPBImpl extends ContainerReport {
}
@Override
public void setStartTime(long startTime) {
public void setCreationTime(long creationTime) {
maybeInitBuilder();
builder.setStartTime(startTime);
builder.setCreationTime(creationTime);
}
public ContainerReportProto getProto() {

View File

@ -34,6 +34,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
/**
@ -56,10 +57,12 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
private long expiryTimeStamp;
private int masterKeyId;
private long rmIdentifier;
private Priority priority;
private long creationTime;
public ContainerTokenIdentifier(ContainerId containerID, String hostName,
String appSubmitter, Resource r, long expiryTimeStamp, int masterKeyId,
long rmIdentifier) {
public ContainerTokenIdentifier(ContainerId containerID,
String hostName, String appSubmitter, Resource r, long expiryTimeStamp,
int masterKeyId, long rmIdentifier, Priority priority, long creationTime) {
this.containerId = containerID;
this.nmHostAddr = hostName;
this.appSubmitter = appSubmitter;
@ -67,6 +70,8 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
this.expiryTimeStamp = expiryTimeStamp;
this.masterKeyId = masterKeyId;
this.rmIdentifier = rmIdentifier;
this.priority = priority;
this.creationTime = creationTime;
}
/**
@ -99,6 +104,13 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
return this.masterKeyId;
}
public Priority getPriority() {
return this.priority;
}
public long getCreationTime() {
return this.creationTime;
}
/**
* Get the RMIdentifier of RM in which containers are allocated
* @return RMIdentifier
@ -124,6 +136,8 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
out.writeLong(this.expiryTimeStamp);
out.writeInt(this.masterKeyId);
out.writeLong(this.rmIdentifier);
out.writeInt(this.priority.getPriority());
out.writeLong(this.creationTime);
}
@Override
@ -142,6 +156,8 @@ public class ContainerTokenIdentifier extends TokenIdentifier {
this.expiryTimeStamp = in.readLong();
this.masterKeyId = in.readInt();
this.rmIdentifier = in.readLong();
this.priority = Priority.newInstance(in.readInt());
this.creationTime = in.readLong();
}
@Override

View File

@ -24,8 +24,6 @@ import java.net.SocketTimeoutException;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -46,6 +44,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -55,6 +54,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.HadoopYarnProtoRPC;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.junit.Assert;
import org.junit.Test;
/*
@ -102,7 +102,8 @@ public class TestContainerLaunchRPC {
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42);
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken =
TestRPC.newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);

View File

@ -23,8 +23,6 @@ import java.net.InetSocketAddress;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtobufRpcEngine;
@ -50,6 +48,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -61,6 +60,7 @@ import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.util.Records;
import org.junit.Assert;
import org.junit.Test;
public class TestRPC {
@ -129,7 +129,8 @@ public class TestRPC {
Resource resource = Resource.newInstance(1234, 2);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(containerId, "localhost", "user",
resource, System.currentTimeMillis() + 10000, 42, 42);
resource, System.currentTimeMillis() + 10000, 42, 42,
Priority.newInstance(0), 0);
Token containerToken = newContainerToken(nodeId, "password".getBytes(),
containerTokenIdentifier);

View File

@ -34,7 +34,8 @@ public abstract class NMContainerStatus {
public static NMContainerStatus newInstance(ContainerId containerId,
ContainerState containerState, Resource allocatedResource,
String diagnostics, int containerExitStatus) {
String diagnostics, int containerExitStatus, Priority priority,
long creationTime) {
NMContainerStatus status =
Records.newRecord(NMContainerStatus.class);
status.setContainerId(containerId);
@ -42,6 +43,8 @@ public abstract class NMContainerStatus {
status.setAllocatedResource(allocatedResource);
status.setDiagnostics(diagnostics);
status.setContainerExitStatus(containerExitStatus);
status.setPriority(priority);
status.setCreationTime(creationTime);
return status;
}
@ -95,4 +98,11 @@ public abstract class NMContainerStatus {
public abstract Priority getPriority();
public abstract void setPriority(Priority priority);
/**
* Get the time when the container is created
*/
public abstract long getCreationTime();
public abstract void setCreationTime(long creationTime);
}

View File

@ -196,6 +196,18 @@ public class NMContainerStatusPBImpl extends NMContainerStatus {
this.priority = priority;
}
@Override
public long getCreationTime() {
NMContainerStatusProtoOrBuilder p = viaProto ? proto : builder;
return p.getCreationTime();
}
@Override
public void setCreationTime(long creationTime) {
maybeInitBuilder();
builder.setCreationTime(creationTime);
}
private void mergeLocalToBuilder() {
if (this.containerId != null
&& !((ContainerIdPBImpl) containerId).getProto().equals(

View File

@ -156,8 +156,7 @@ public class BuilderUtils {
byte[] password, long rmIdentifier) throws IOException {
ContainerTokenIdentifier identifier =
new ContainerTokenIdentifier(cId, host + ":" + port, user, r,
expiryTime,
masterKeyId, rmIdentifier);
expiryTime, masterKeyId, rmIdentifier, Priority.newInstance(0), 0);
return newContainerToken(BuilderUtils.newNodeId(host, port), password,
identifier);
}

View File

@ -57,7 +57,7 @@ public class ContainerInfo {
assignedNodeId = container.getAssignedNode().toString();
}
priority = container.getPriority().getPriority();
startedTime = container.getStartTime();
startedTime = container.getCreationTime();
finishedTime = container.getFinishTime();
elapsedTime = Times.elapsed(startedTime, finishedTime);
diagnosticsInfo = container.getDiagnosticsInfo();

View File

@ -67,4 +67,5 @@ message NMContainerStatusProto {
optional PriorityProto priority = 4;
optional string diagnostics = 5 [default = "N/A"];
optional int32 container_exit_status = 6;
}
optional int64 creation_time = 7;
}

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.api.protocolrecords.RegisterNodeManagerRequest;
@ -38,7 +39,7 @@ import org.junit.Test;
public class TestProtocolRecords {
@Test
public void testContainerRecoveryReport() {
public void testNMContainerStatus() {
ApplicationId appId = ApplicationId.newInstance(123456789, 1);
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId, 1);
ContainerId containerId = ContainerId.newInstance(attemptId, 1);
@ -47,7 +48,7 @@ public class TestProtocolRecords {
NMContainerStatus report =
NMContainerStatus.newInstance(containerId,
ContainerState.COMPLETE, resource, "diagnostics",
ContainerExitStatus.ABORTED);
ContainerExitStatus.ABORTED, Priority.newInstance(10), 1234);
NMContainerStatus reportProto =
new NMContainerStatusPBImpl(
((NMContainerStatusPBImpl) report).getProto());
@ -58,15 +59,8 @@ public class TestProtocolRecords {
Assert.assertEquals(ContainerState.COMPLETE,
reportProto.getContainerState());
Assert.assertEquals(containerId, reportProto.getContainerId());
}
public static NMContainerStatus createContainerRecoveryReport(
ApplicationAttemptId appAttemptId, int id, ContainerState containerState) {
ContainerId containerId = ContainerId.newInstance(appAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, containerState,
Resource.newInstance(1024, 1), "diagnostics", 0);
return containerReport;
Assert.assertEquals(Priority.newInstance(10), reportProto.getPriority());
Assert.assertEquals(1234, reportProto.getCreationTime());
}
@Test
@ -78,7 +72,7 @@ public class TestProtocolRecords {
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId,
ContainerState.RUNNING, Resource.newInstance(1024, 1), "diagnostics",
0);
0, Priority.newInstance(10), 1234);
List<NMContainerStatus> reports = Arrays.asList(containerReport);
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(

View File

@ -24,8 +24,8 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.server.api.protocolrecords.impl.pb.RegisterNodeManagerRequestPBImpl;
import org.junit.Assert;
@ -36,14 +36,15 @@ public class TestRegisterNodeManagerRequest {
public void testRegisterNodeManagerRequest() {
RegisterNodeManagerRequest request =
RegisterNodeManagerRequest.newInstance(
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", Arrays.asList(NMContainerStatus.newInstance(
ContainerId.newInstance(
ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234L, 1), 1), 1),
ContainerState.RUNNING, Resource.newInstance(1024, 1), "good",
-1)), Arrays.asList(ApplicationId.newInstance(1234L, 1),
ApplicationId.newInstance(1234L, 2)));
NodeId.newInstance("host", 1234), 1234, Resource.newInstance(0, 0),
"version", Arrays.asList(NMContainerStatus.newInstance(
ContainerId.newInstance(
ApplicationAttemptId.newInstance(
ApplicationId.newInstance(1234L, 1), 1), 1),
ContainerState.RUNNING, Resource.newInstance(1024, 1), "good", -1,
Priority.newInstance(0), 1234)), Arrays.asList(
ApplicationId.newInstance(1234L, 1),
ApplicationId.newInstance(1234L, 2)));
// serialze to proto, and get request from proto
RegisterNodeManagerRequest request1 =

View File

@ -391,8 +391,10 @@ public class ContainerImpl implements Container {
public NMContainerStatus getNMContainerStatus() {
this.readLock.lock();
try {
return NMContainerStatus.newInstance(this.containerId,
getCurrentState(), getResource(), diagnostics.toString(), exitCode);
return NMContainerStatus.newInstance(this.containerId, getCurrentState(),
getResource(), diagnostics.toString(), exitCode,
containerTokenIdentifier.getPriority(),
containerTokenIdentifier.getCreationTime());
} finally {
this.readLock.unlock();
}
@ -935,5 +937,4 @@ public class ContainerImpl implements Container {
private boolean hasDefaultExitCode() {
return (this.exitCode == ContainerExitStatus.INVALID);
}
}

View File

@ -41,6 +41,7 @@ import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
@ -529,7 +530,8 @@ public class TestNodeManagerResync {
ContainerId containerId = ContainerId.newInstance(applicationAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, containerState,
Resource.newInstance(1024, 1), "recover container", 0);
Resource.newInstance(1024, 1), "recover container", 0,
Priority.newInstance(10), 0);
return containerReport;
}
}

View File

@ -33,8 +33,6 @@ import java.util.ArrayList;
import java.util.Collection;
import java.util.Map;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -46,6 +44,7 @@ import org.apache.hadoop.service.Service;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
@ -54,10 +53,9 @@ import org.apache.hadoop.yarn.server.api.ApplicationTerminationContext;
import org.apache.hadoop.yarn.server.api.AuxiliaryService;
import org.apache.hadoop.yarn.server.api.ContainerInitializationContext;
import org.apache.hadoop.yarn.server.api.ContainerTerminationContext;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container
.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container
.ContainerImpl;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerImpl;
import org.junit.Assert;
import org.junit.Test;
public class TestAuxServices {
@ -192,7 +190,7 @@ public class TestAuxServices {
ApplicationAttemptId attemptId = ApplicationAttemptId.newInstance(appId1, 1);
ContainerTokenIdentifier cti = new ContainerTokenIdentifier(
ContainerId.newInstance(attemptId, 1), "", "",
Resource.newInstance(1, 1), 0,0,0);
Resource.newInstance(1, 1), 0,0,0, Priority.newInstance(0), 0);
Container container = new ContainerImpl(null, null, null, null, null, cti);
ContainerId containerId = container.getContainerId();
Resource resource = container.getResource();

View File

@ -31,9 +31,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.junit.Assert;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
@ -50,6 +47,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
@ -58,6 +56,7 @@ import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.api.records.Token;
@ -80,6 +79,7 @@ import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecret
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -798,7 +798,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
Resource r = BuilderUtils.newResource(1024, 1);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(cId, nodeId.toString(), user, r,
System.currentTimeMillis() + 100000L, 123, rmIdentifier);
System.currentTimeMillis() + 100000L, 123, rmIdentifier,
Priority.newInstance(0), 0);
Token containerToken =
BuilderUtils
.newContainerToken(nodeId, containerTokenSecretManager

View File

@ -32,14 +32,13 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.event.DrainDispatcher;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
@ -66,6 +65,7 @@ import org.apache.hadoop.yarn.server.nodemanager.security.NMContainerTokenSecret
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.security.ApplicationACLsManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
@ -538,7 +538,8 @@ public class TestApplication {
long currentTime = System.currentTimeMillis();
ContainerTokenIdentifier identifier =
new ContainerTokenIdentifier(container.getContainerId(), "", "",
null, currentTime + 2000, masterKey.getKeyId(), currentTime);
null, currentTime + 2000, masterKey.getKeyId(), currentTime,
Priority.newInstance(0), 0);
containerTokenIdentifierMap
.put(identifier.getContainerID(), identifier);
context.getContainerTokenSecretManager().startContainerSuccessful(

View File

@ -17,7 +17,6 @@
*/
package org.apache.hadoop.yarn.server.nodemanager.containermanager.container;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertNotNull;
import static org.junit.Assert.assertNull;
@ -49,17 +48,17 @@ import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Future;
import org.junit.Assert;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
@ -90,6 +89,7 @@ import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.Contai
import org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor.ContainersMonitorEventType;
import org.apache.hadoop.yarn.server.nodemanager.metrics.NodeManagerMetrics;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.junit.Assert;
import org.junit.Test;
import org.mockito.ArgumentMatcher;
@ -750,7 +750,7 @@ public class TestContainer {
long currentTime = System.currentTimeMillis();
ContainerTokenIdentifier identifier =
new ContainerTokenIdentifier(cId, "127.0.0.1", user, resource,
currentTime + 10000L, 123, currentTime);
currentTime + 10000L, 123, currentTime, Priority.newInstance(0), 0);
Token token =
BuilderUtils.newContainerToken(BuilderUtils.newNodeId(host, port),
"password".getBytes(), identifier);

View File

@ -18,11 +18,10 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.launcher;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertThat;
import static org.junit.Assert.fail;
import static org.junit.matchers.JUnitMatchers.*;
import static org.junit.matchers.JUnitMatchers.containsString;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
@ -41,7 +40,6 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import org.junit.Assert;
import org.apache.commons.codec.binary.Base64;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
@ -59,6 +57,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StopContainersRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
@ -66,6 +65,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
@ -74,6 +74,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
import org.apache.hadoop.yarn.event.Event;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.protocolrecords.NMContainerStatus;
import org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.BaseContainerManagerTest;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
@ -87,6 +88,7 @@ import org.apache.hadoop.yarn.util.AuxiliaryServiceHelper;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.junit.Assert;
import org.junit.Assume;
import org.junit.Before;
import org.junit.Test;
@ -480,7 +482,7 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
containerLaunchContext.setCommands(commands);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
createContainerToken(cId));
createContainerToken(cId, Priority.newInstance(0), 0));
List<StartContainerRequest> list = new ArrayList<StartContainerRequest>();
list.add(scRequest);
StartContainersRequest allRequests =
@ -679,7 +681,9 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
// set up the rest of the container
List<String> commands = Arrays.asList(Shell.getRunScriptCommand(scriptFile));
containerLaunchContext.setCommands(commands);
Token containerToken = createContainerToken(cId);
Priority priority = Priority.newInstance(10);
long createTime = 1234;
Token containerToken = createContainerToken(cId, priority, createTime);
StartContainerRequest scRequest =
StartContainerRequest.newInstance(containerLaunchContext,
@ -698,6 +702,11 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
Assert.assertTrue("ProcessStartFile doesn't exist!",
processStartFile.exists());
NMContainerStatus nmContainerStatus =
containerManager.getContext().getContainers().get(cId)
.getNMContainerStatus();
Assert.assertEquals(priority, nmContainerStatus.getPriority());
// Now test the stop functionality.
List<ContainerId> containerIds = new ArrayList<ContainerId>();
containerIds.add(cId);
@ -783,11 +792,13 @@ public class TestContainerLaunch extends BaseContainerManagerTest {
launch.call();
}
protected Token createContainerToken(ContainerId cId) throws InvalidToken {
protected Token createContainerToken(ContainerId cId, Priority priority,
long createTime) throws InvalidToken {
Resource r = BuilderUtils.newResource(1024, 1);
ContainerTokenIdentifier containerTokenIdentifier =
new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user,
r, System.currentTimeMillis() + 10000L, 123, DUMMY_RM_IDENTIFIER);
r, System.currentTimeMillis() + 10000L, 123, DUMMY_RM_IDENTIFIER,
priority, createTime);
Token containerToken =
BuilderUtils.newContainerToken(
context.getNodeId(),

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.yarn.server.nodemanager.containermanager.monitor;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@ -35,8 +34,6 @@ import java.util.List;
import java.util.Map;
import java.util.regex.Pattern;
import org.junit.Assert;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
@ -46,6 +43,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest;
import org.apache.hadoop.yarn.api.protocolrecords.StartContainersRequest;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerExitStatus;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerState;
@ -53,6 +51,7 @@ import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.api.records.URL;
@ -70,6 +69,7 @@ import org.apache.hadoop.yarn.util.LinuxResourceCalculatorPlugin;
import org.apache.hadoop.yarn.util.ProcfsBasedProcessTree;
import org.apache.hadoop.yarn.util.ResourceCalculatorPlugin;
import org.apache.hadoop.yarn.util.TestProcfsBasedProcessTree;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -230,7 +230,8 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
Resource r = BuilderUtils.newResource(8 * 1024 * 1024, 1);
ContainerTokenIdentifier containerIdentifier =
new ContainerTokenIdentifier(cId, context.getNodeId().toString(), user,
r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER);
r, System.currentTimeMillis() + 120000, 123, DUMMY_RM_IDENTIFIER,
Priority.newInstance(0), 0);
Token containerToken =
BuilderUtils.newContainerToken(context.getNodeId(),
containerManager.getContext().getContainerTokenSecretManager()

View File

@ -269,7 +269,7 @@ public class RMApplicationHistoryWriter extends CompositeService {
new WritingContainerStartEvent(container.getContainerId(),
ContainerStartData.newInstance(container.getContainerId(),
container.getAllocatedResource(), container.getAllocatedNode(),
container.getAllocatedPriority(), container.getStartTime())));
container.getAllocatedPriority(), container.getCreationTime())));
}
}

View File

@ -58,7 +58,7 @@ public interface RMContainer extends EventHandler<RMContainerEvent> {
Priority getAllocatedPriority();
long getStartTime();
long getCreationTime();
long getFinishTime();

View File

@ -152,23 +152,27 @@ public class RMContainerImpl implements RMContainer {
private Resource reservedResource;
private NodeId reservedNode;
private Priority reservedPriority;
private long startTime;
private long creationTime;
private long finishTime;
private ContainerStatus finishedStatus;
public RMContainerImpl(Container container,
ApplicationAttemptId appAttemptId, NodeId nodeId, String user,
RMContext rmContext) {
this(container, appAttemptId, nodeId, user, rmContext, System
.currentTimeMillis());
}
public RMContainerImpl(Container container,
ApplicationAttemptId appAttemptId, NodeId nodeId,
String user, RMContext rmContext) {
String user, RMContext rmContext, long creationTime) {
this.stateMachine = stateMachineFactory.make(this);
this.containerId = container.getId();
this.nodeId = nodeId;
this.container = container;
this.appAttemptId = appAttemptId;
this.user = user;
this.startTime = System.currentTimeMillis();
this.creationTime = creationTime;
this.rmContext = rmContext;
this.eventHandler = rmContext.getDispatcher().getEventHandler();
this.containerAllocationExpirer = rmContext.getContainerAllocationExpirer();
@ -237,8 +241,8 @@ public class RMContainerImpl implements RMContainer {
}
@Override
public long getStartTime() {
return startTime;
public long getCreationTime() {
return creationTime;
}
@Override
@ -478,7 +482,7 @@ public class RMContainerImpl implements RMContainer {
try {
containerReport = ContainerReport.newInstance(this.getContainerId(),
this.getAllocatedResource(), this.getAllocatedNode(),
this.getAllocatedPriority(), this.getStartTime(),
this.getAllocatedPriority(), this.getCreationTime(),
this.getFinishTime(), this.getDiagnosticsInfo(), this.getLogURL(),
this.getContainerExitStatus(), this.getContainerState());
} finally {

View File

@ -245,17 +245,18 @@ public abstract class AbstractYarnScheduler
}
}
private RMContainer recoverAndCreateContainer(NMContainerStatus report,
private RMContainer recoverAndCreateContainer(NMContainerStatus status,
RMNode node) {
Container container =
Container.newInstance(report.getContainerId(), node.getNodeID(),
node.getHttpAddress(), report.getAllocatedResource(),
report.getPriority(), null);
Container.newInstance(status.getContainerId(), node.getNodeID(),
node.getHttpAddress(), status.getAllocatedResource(),
status.getPriority(), null);
ApplicationAttemptId attemptId =
container.getId().getApplicationAttemptId();
RMContainer rmContainer =
new RMContainerImpl(container, attemptId, node.getNodeID(),
applications.get(attemptId.getApplicationId()).getUser(), rmContext);
applications.get(attemptId.getApplicationId()).getUser(), rmContext,
status.getCreationTime());
return rmContainer;
}

View File

@ -414,7 +414,8 @@ public class SchedulerApplicationAttempt {
// create container token and NMToken altogether.
container.setContainerToken(rmContext.getContainerTokenSecretManager()
.createContainerToken(container.getId(), container.getNodeId(),
getUser(), container.getResource()));
getUser(), container.getResource(), container.getPriority(),
rmContainer.getCreationTime()));
NMToken nmToken =
rmContext.getNMTokenSecretManager().createAndGetNMToken(getUser(),
getApplicationAttemptId(), container);

View File

@ -27,6 +27,7 @@ import org.apache.hadoop.classification.InterfaceAudience.Private;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.Token;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@ -34,8 +35,8 @@ import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.api.records.MasterKey;
import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager;
import org.apache.hadoop.yarn.server.security.BaseContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.server.security.MasterKeyData;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
/**
* SecretManager for ContainerTokens. This is RM-specific and rolls the
@ -169,11 +170,13 @@ public class RMContainerTokenSecretManager extends
* @param nodeId
* @param appSubmitter
* @param capability
* @param priority
* @param createTime
* @return the container-token
*/
public Token
createContainerToken(ContainerId containerId, NodeId nodeId,
String appSubmitter, Resource capability) {
public Token createContainerToken(ContainerId containerId, NodeId nodeId,
String appSubmitter, Resource capability, Priority priority,
long createTime) {
byte[] password;
ContainerTokenIdentifier tokenIdentifier;
long expiryTimeStamp =
@ -185,7 +188,8 @@ public class RMContainerTokenSecretManager extends
tokenIdentifier =
new ContainerTokenIdentifier(containerId, nodeId.toString(),
appSubmitter, capability, expiryTimeStamp, this.currentMasterKey
.getMasterKey().getKeyId(), ResourceManager.getClusterTimeStamp());
.getMasterKey().getKeyId(),
ResourceManager.getClusterTimeStamp(), priority, createTime);
password = this.createPassword(tokenIdentifier);
} finally {

View File

@ -50,7 +50,6 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt;
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEvent;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.http.client.params.AllClientPNames;
import org.apache.log4j.Level;
import org.apache.log4j.LogManager;
import org.apache.log4j.Logger;
@ -382,9 +381,9 @@ public class TestApplicationCleanup {
// nm1/nm2 register to rm2, and do a heartbeat
nm1.setResourceTrackerService(rm2.getResourceTrackerService());
nm1.registerNode(Arrays.asList(NMContainerStatus.newInstance(
ContainerId.newInstance(am0.getApplicationAttemptId(), 1),
ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0)), Arrays
.asList(app0.getApplicationId()));
ContainerId.newInstance(am0.getApplicationAttemptId(), 1),
ContainerState.COMPLETE, Resource.newInstance(1024, 1), "", 0,
Priority.newInstance(0), 1234)), Arrays.asList(app0.getApplicationId()));
nm2.setResourceTrackerService(rm2.getResourceTrackerService());
nm2.registerNode(Arrays.asList(app0.getApplicationId()));

View File

@ -69,6 +69,7 @@ import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
@ -1851,7 +1852,8 @@ public class TestRMRestart {
ContainerId containerId = ContainerId.newInstance(appAttemptId, id);
NMContainerStatus containerReport =
NMContainerStatus.newInstance(containerId, containerState,
Resource.newInstance(1024, 1), "recover container", 0);
Resource.newInstance(1024, 1), "recover container", 0,
Priority.newInstance(0), 0);
return containerReport;
}

View File

@ -18,11 +18,14 @@
package org.apache.hadoop.yarn.server.resourcemanager;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.List;
@ -33,12 +36,12 @@ import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerState;
import org.apache.hadoop.yarn.api.records.ContainerStatus;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.NodeState;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.Dispatcher;
@ -57,17 +60,10 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event.SchedulerEv
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
import org.apache.hadoop.yarn.util.YarnVersionInfo;
import org.junit.After;
import org.junit.Assert;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
import static org.mockito.Matchers.any;
import static org.mockito.Mockito.never;
import static org.mockito.Mockito.spy;
import static org.mockito.Mockito.verify;
public class TestResourceTrackerService {
private final static File TEMP_DIR = new File(System.getProperty(
@ -493,7 +489,7 @@ public class TestResourceTrackerService {
ContainerId.newInstance(
ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0);
"Dummy Completed", 0, Priority.newInstance(10), 1234);
rm.getResourceTrackerService().handleNMContainerStatus(report);
verify(handler, never()).handle((Event) any());
@ -504,7 +500,7 @@ public class TestResourceTrackerService {
report = NMContainerStatus.newInstance(
ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0);
"Dummy Completed", 0, Priority.newInstance(10), 1234);
rm.getResourceTrackerService().handleNMContainerStatus(report);
verify(handler, never()).handle((Event)any());
@ -516,7 +512,7 @@ public class TestResourceTrackerService {
ContainerId.newInstance(
ApplicationAttemptId.newInstance(app.getApplicationId(), 2), 1),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0);
"Dummy Completed", 0, Priority.newInstance(10), 1234);
try {
rm.getResourceTrackerService().handleNMContainerStatus(report);
} catch (Exception e) {
@ -531,7 +527,7 @@ public class TestResourceTrackerService {
report = NMContainerStatus.newInstance(
ContainerId.newInstance(currentAttempt.getAppAttemptId(), 0),
ContainerState.COMPLETE, Resource.newInstance(1024, 1),
"Dummy Completed", 0);
"Dummy Completed", 0, Priority.newInstance(10), 1234);
try {
rm.getResourceTrackerService().handleNMContainerStatus(report);
} catch (Exception e) {

View File

@ -165,7 +165,7 @@ public class TestRMApplicationHistoryWriter {
when(container.getAllocatedResource()).thenReturn(
Resource.newInstance(-1, -1));
when(container.getAllocatedPriority()).thenReturn(Priority.UNDEFINED);
when(container.getStartTime()).thenReturn(0L);
when(container.getCreationTime()).thenReturn(0L);
when(container.getFinishTime()).thenReturn(1L);
when(container.getDiagnosticsInfo()).thenReturn("test diagnostics info");
when(container.getLogURL()).thenReturn("test log url");
@ -281,7 +281,7 @@ public class TestRMApplicationHistoryWriter {
Assert.assertEquals(Resource.newInstance(-1, -1),
containerHD.getAllocatedResource());
Assert.assertEquals(Priority.UNDEFINED, containerHD.getPriority());
Assert.assertEquals(0L, container.getStartTime());
Assert.assertEquals(0L, container.getCreationTime());
writer.containerFinished(container);
for (int i = 0; i < MAX_RETRIES; ++i) {

View File

@ -21,8 +21,6 @@ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity;
import java.util.ArrayList;
import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -31,6 +29,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceRequest;
import org.apache.hadoop.yarn.api.records.Token;
@ -48,6 +47,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer;
import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState;
import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
@ -209,10 +209,11 @@ public class TestContainerAllocation {
@Override
public Token createContainerToken(ContainerId containerId,
NodeId nodeId, String appSubmitter, Resource capability) {
NodeId nodeId, String appSubmitter, Resource capability,
Priority priority, long createTime) {
numRetries++;
return super.createContainerToken(containerId, nodeId, appSubmitter,
capability);
capability, priority, createTime);
}
};
}

View File

@ -29,8 +29,6 @@ import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.junit.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@ -52,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Priority;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.SerializedException;
import org.apache.hadoop.yarn.api.records.Token;
@ -60,15 +59,18 @@ import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.server.nodemanager.Context;
import org.apache.hadoop.yarn.server.nodemanager.NodeManager;
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
import org.apache.hadoop.yarn.server.nodemanager.security.NMTokenSecretManagerInNM;
import org.apache.hadoop.yarn.server.resourcemanager.security.NMTokenSecretManagerInRM;
import org.apache.hadoop.yarn.server.resourcemanager.security.RMContainerTokenSecretManager;
import org.apache.hadoop.yarn.server.utils.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.Records;
import org.junit.After;
import org.junit.Assert;
import org.junit.Before;
import org.junit.Test;
import org.junit.runner.RunWith;
@ -215,7 +217,11 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
org.apache.hadoop.yarn.api.records.Token validContainerToken =
containerTokenSecretManager.createContainerToken(validContainerId,
validNode, user, r);
validNode, user, r, Priority.newInstance(10), 1234);
ContainerTokenIdentifier identifier =
BuilderUtils.newContainerTokenIdentifier(validContainerToken);
Assert.assertEquals(Priority.newInstance(10), identifier.getPriority());
Assert.assertEquals(1234, identifier.getCreationTime());
StringBuilder sb;
// testInvalidNMToken ... creating NMToken using different secret manager.
@ -280,8 +286,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
4 * 60 * 1000);
validContainerToken =
containerTokenSecretManager.createContainerToken(validContainerId,
validNode, user, r);
validNode, user, r, Priority.newInstance(0), 0);
testStartContainer(rpc, validAppAttemptId, validNode, validContainerToken,
validNMToken, false);
Assert.assertTrue(nmTokenSecretManagerNM
@ -590,7 +595,7 @@ public class TestContainerManagerSecurity extends KerberosSecurityTestcase {
// Creating modified containerToken
Token containerToken =
tamperedContainerTokenSecretManager.createContainerToken(cId, nodeId,
user, r);
user, r, Priority.newInstance(0), 0);
Token nmToken =
nmTokenSecretManagerInRM.createNMToken(appAttemptId, nodeId, user);
YarnRPC rpc = YarnRPC.create(conf);