YARN-629. Make YarnRemoteException not be rooted at IOException. Contributed by Xuan Gong.
MAPREDUCE-5204. Handling YarnRemoteException separately from IOException in MR app after YARN-629. Contributed by Xuan Gong. svn merge --ignore-ancestry -c 1479680 ../../trunk/ git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1479684 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
parent
8b755237f8
commit
486391f766
|
@ -215,6 +215,9 @@ Release 2.0.5-beta - UNRELEASED
|
|||
|
||||
MAPREDUCE-5205. Fixed MR App to load tokens correctly. (vinodkv)
|
||||
|
||||
MAPREDUCE-5204. Handling YarnRemoteException separately from IOException in
|
||||
MR app after YARN-629. (Xuan Gong via vinodkv)
|
||||
|
||||
Release 2.0.4-alpha - UNRELEASED
|
||||
|
||||
INCOMPATIBLE CHANGES
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenRenewer;
|
||||
import org.apache.hadoop.yarn.api.records.DelegationToken;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
@ -67,6 +68,8 @@ public class MRDelegationTokenRenewer extends TokenRenewer {
|
|||
.newRecord(RenewDelegationTokenRequest.class);
|
||||
request.setDelegationToken(dToken);
|
||||
return histProxy.renewDelegationToken(request).getNextExpirationTime();
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
} finally {
|
||||
stopHistoryProxy(histProxy);
|
||||
}
|
||||
|
@ -88,6 +91,8 @@ public class MRDelegationTokenRenewer extends TokenRenewer {
|
|||
.newRecord(CancelDelegationTokenRequest.class);
|
||||
request.setDelegationToken(dToken);
|
||||
histProxy.cancelDelegationToken(request);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
} finally {
|
||||
stopHistoryProxy(histProxy);
|
||||
}
|
||||
|
|
|
@ -302,13 +302,13 @@ public class ClientServiceDelegate {
|
|||
return methodOb.invoke(getProxy(), args);
|
||||
} catch (YarnRemoteException yre) {
|
||||
LOG.warn("Exception thrown by remote end.", yre);
|
||||
throw yre;
|
||||
throw new IOException(yre);
|
||||
} catch (InvocationTargetException e) {
|
||||
if (e.getTargetException() instanceof YarnRemoteException) {
|
||||
LOG.warn("Error from remote end: " + e
|
||||
.getTargetException().getLocalizedMessage());
|
||||
LOG.debug("Tracing remote error ", e.getTargetException());
|
||||
throw (YarnRemoteException) e.getTargetException();
|
||||
throw new IOException(e.getTargetException());
|
||||
}
|
||||
LOG.debug("Failed to contact AM/History for job " + jobId +
|
||||
" retrying..", e.getTargetException());
|
||||
|
|
|
@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
|
|||
import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
|
||||
import org.apache.hadoop.yarn.client.YarnClientImpl;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.util.ProtoUtils;
|
||||
|
||||
public class ResourceMgrDelegate extends YarnClientImpl {
|
||||
|
@ -65,11 +66,19 @@ public class ResourceMgrDelegate extends YarnClientImpl {
|
|||
|
||||
public TaskTrackerInfo[] getActiveTrackers() throws IOException,
|
||||
InterruptedException {
|
||||
try {
|
||||
return TypeConverter.fromYarnNodes(super.getNodeReports());
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public JobStatus[] getAllJobs() throws IOException, InterruptedException {
|
||||
try {
|
||||
return TypeConverter.fromYarnApps(super.getApplicationList(), this.conf);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
|
||||
|
@ -81,11 +90,17 @@ public class ResourceMgrDelegate extends YarnClientImpl {
|
|||
|
||||
public ClusterMetrics getClusterMetrics() throws IOException,
|
||||
InterruptedException {
|
||||
try {
|
||||
YarnClusterMetrics metrics = super.getYarnClusterMetrics();
|
||||
ClusterMetrics oldMetrics = new ClusterMetrics(1, 1, 1, 1, 1, 1,
|
||||
metrics.getNumNodeManagers() * 10, metrics.getNumNodeManagers() * 2, 1,
|
||||
ClusterMetrics oldMetrics =
|
||||
new ClusterMetrics(1, 1, 1, 1, 1, 1,
|
||||
metrics.getNumNodeManagers() * 10,
|
||||
metrics.getNumNodeManagers() * 2, 1,
|
||||
metrics.getNumNodeManagers(), 0, 0);
|
||||
return oldMetrics;
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
InetSocketAddress getConnectAddress() {
|
||||
|
@ -95,8 +110,12 @@ public class ResourceMgrDelegate extends YarnClientImpl {
|
|||
@SuppressWarnings("rawtypes")
|
||||
public Token getDelegationToken(Text renewer) throws IOException,
|
||||
InterruptedException {
|
||||
try {
|
||||
return ProtoUtils.convertFromProtoFormat(
|
||||
super.getRMDelegationToken(renewer), rmAddress);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public String getFilesystemName() throws IOException, InterruptedException {
|
||||
|
@ -104,36 +123,62 @@ public class ResourceMgrDelegate extends YarnClientImpl {
|
|||
}
|
||||
|
||||
public JobID getNewJobID() throws IOException, InterruptedException {
|
||||
try {
|
||||
this.application = super.getNewApplication();
|
||||
this.applicationId = this.application.getApplicationId();
|
||||
return TypeConverter.fromYarn(applicationId);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public QueueInfo getQueue(String queueName) throws IOException,
|
||||
InterruptedException {
|
||||
try {
|
||||
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
|
||||
super.getQueueInfo(queueName);
|
||||
return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo, conf);
|
||||
return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo,
|
||||
conf);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
|
||||
InterruptedException {
|
||||
try {
|
||||
return TypeConverter.fromYarnQueueUserAclsInfo(super
|
||||
.getQueueAclsInfo());
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public QueueInfo[] getQueues() throws IOException, InterruptedException {
|
||||
try {
|
||||
return TypeConverter.fromYarnQueueInfo(super.getAllQueues(), this.conf);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
|
||||
return TypeConverter.fromYarnQueueInfo(super.getRootQueueInfos(), this.conf);
|
||||
try {
|
||||
return TypeConverter.fromYarnQueueInfo(super.getRootQueueInfos(),
|
||||
this.conf);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public QueueInfo[] getChildQueues(String parent) throws IOException,
|
||||
InterruptedException {
|
||||
try {
|
||||
return TypeConverter.fromYarnQueueInfo(super.getChildQueueInfos(parent),
|
||||
this.conf);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
public String getStagingAreaDir() throws IOException, InterruptedException {
|
||||
|
|
|
@ -80,6 +80,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
|
|||
import org.apache.hadoop.yarn.api.records.URL;
|
||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.security.client.RMTokenSelector;
|
||||
|
@ -207,10 +208,15 @@ public class YARNRunner implements ClientProtocol {
|
|||
GetDelegationTokenRequest request = recordFactory
|
||||
.newRecordInstance(GetDelegationTokenRequest.class);
|
||||
request.setRenewer(Master.getMasterPrincipal(conf));
|
||||
DelegationToken mrDelegationToken = hsProxy.getDelegationToken(request)
|
||||
DelegationToken mrDelegationToken;
|
||||
try {
|
||||
mrDelegationToken = hsProxy.getDelegationToken(request)
|
||||
.getDelegationToken();
|
||||
return ProtoUtils.convertFromProtoFormat(mrDelegationToken,
|
||||
hsProxy.getConnectAddress());
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -295,19 +301,25 @@ public class YARNRunner implements ClientProtocol {
|
|||
createApplicationSubmissionContext(conf, jobSubmitDir, ts);
|
||||
|
||||
// Submit to ResourceManager
|
||||
ApplicationId applicationId = resMgrDelegate.submitApplication(appContext);
|
||||
try {
|
||||
ApplicationId applicationId =
|
||||
resMgrDelegate.submitApplication(appContext);
|
||||
|
||||
ApplicationReport appMaster = resMgrDelegate
|
||||
.getApplicationReport(applicationId);
|
||||
String diagnostics =
|
||||
(appMaster == null ?
|
||||
"application report is null" : appMaster.getDiagnostics());
|
||||
if (appMaster == null || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
|
||||
if (appMaster == null
|
||||
|| appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
|
||||
|| appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
|
||||
throw new IOException("Failed to run job : " +
|
||||
diagnostics);
|
||||
}
|
||||
return clientCache.getClient(jobId).getJobStatus(jobId);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private LocalResource createApplicationResource(FileContext fs, Path p, LocalResourceType type)
|
||||
|
@ -552,7 +564,11 @@ public class YARNRunner implements ClientProtocol {
|
|||
/* check if the status is not running, if not send kill to RM */
|
||||
JobStatus status = clientCache.getClient(arg0).getJobStatus(arg0);
|
||||
if (status.getState() != JobStatus.State.RUNNING) {
|
||||
try {
|
||||
resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -576,7 +592,11 @@ public class YARNRunner implements ClientProtocol {
|
|||
LOG.debug("Error when checking for application status", io);
|
||||
}
|
||||
if (status.getState() != JobStatus.State.KILLED) {
|
||||
try {
|
||||
resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -607,7 +627,11 @@ public class YARNRunner implements ClientProtocol {
|
|||
@Override
|
||||
public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
|
||||
throws IOException {
|
||||
try {
|
||||
return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
|
||||
private static void warnForJavaLibPath(String opts, String component,
|
||||
|
|
|
@ -115,8 +115,9 @@ public class TestClientServiceDelegate {
|
|||
try {
|
||||
clientServiceDelegate.getJobStatus(oldJobId);
|
||||
Assert.fail("Invoke should throw exception after retries.");
|
||||
} catch (YarnRemoteException e) {
|
||||
Assert.assertEquals("Job ID doesnot Exist", e.getMessage());
|
||||
} catch (IOException e) {
|
||||
Assert.assertTrue(e.getMessage().contains(
|
||||
"Job ID doesnot Exist"));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -198,7 +199,8 @@ public class TestClientServiceDelegate {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testReconnectOnAMRestart() throws IOException {
|
||||
public void testReconnectOnAMRestart() throws IOException,
|
||||
YarnRemoteException {
|
||||
//test not applicable when AM not reachable
|
||||
//as instantiateAMProxy is not called at all
|
||||
if(!isAMReachableFromClient) {
|
||||
|
@ -265,7 +267,7 @@ public class TestClientServiceDelegate {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testAMAccessDisabled() throws IOException {
|
||||
public void testAMAccessDisabled() throws IOException, YarnRemoteException {
|
||||
//test only applicable when AM not reachable
|
||||
if(isAMReachableFromClient) {
|
||||
return;
|
||||
|
@ -317,7 +319,8 @@ public class TestClientServiceDelegate {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testRMDownForJobStatusBeforeGetAMReport() throws IOException {
|
||||
public void testRMDownForJobStatusBeforeGetAMReport() throws IOException,
|
||||
YarnRemoteException {
|
||||
Configuration conf = new YarnConfiguration();
|
||||
testRMDownForJobStatusBeforeGetAMReport(conf,
|
||||
MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES);
|
||||
|
@ -325,7 +328,7 @@ public class TestClientServiceDelegate {
|
|||
|
||||
@Test
|
||||
public void testRMDownForJobStatusBeforeGetAMReportWithRetryTimes()
|
||||
throws IOException {
|
||||
throws IOException, YarnRemoteException {
|
||||
Configuration conf = new YarnConfiguration();
|
||||
conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 2);
|
||||
testRMDownForJobStatusBeforeGetAMReport(conf, conf.getInt(
|
||||
|
@ -335,7 +338,7 @@ public class TestClientServiceDelegate {
|
|||
|
||||
@Test
|
||||
public void testRMDownRestoreForJobStatusBeforeGetAMReport()
|
||||
throws IOException {
|
||||
throws IOException, YarnRemoteException {
|
||||
Configuration conf = new YarnConfiguration();
|
||||
conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 3);
|
||||
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
|
|||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
import org.junit.Test;
|
||||
import org.mockito.ArgumentCaptor;
|
||||
|
@ -47,9 +48,12 @@ public class TestResourceMgrDelegate {
|
|||
|
||||
/**
|
||||
* Tests that getRootQueues makes a request for the (recursive) child queues
|
||||
* @throws YarnRemoteException
|
||||
* @throws IOException
|
||||
*/
|
||||
@Test
|
||||
public void testGetRootQueues() throws IOException, InterruptedException {
|
||||
public void testGetRootQueues() throws IOException, InterruptedException,
|
||||
YarnRemoteException {
|
||||
final ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
|
||||
GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
|
||||
org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
|
||||
|
|
|
@ -40,7 +40,6 @@ import org.apache.hadoop.mapreduce.tools.CLI;
|
|||
import org.apache.hadoop.util.ExitUtil;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
|
||||
/**
|
||||
test CLI class. CLI class implemented the Tool interface.
|
||||
|
@ -154,7 +153,7 @@ public class TestMRJobClient extends ClusterMapReduceTestCase {
|
|||
try {
|
||||
runTool(conf, jc, new String[] { "-fail-task", taid.toString() }, out);
|
||||
fail(" this task should field");
|
||||
} catch (YarnRemoteException e) {
|
||||
} catch (IOException e) {
|
||||
// task completed !
|
||||
assertTrue(e.getMessage().contains("_0001_m_000000_1"));
|
||||
}
|
||||
|
@ -174,7 +173,7 @@ public class TestMRJobClient extends ClusterMapReduceTestCase {
|
|||
try {
|
||||
runTool(conf, jc, new String[] { "-kill-task", taid.toString() }, out);
|
||||
fail(" this task should be killed");
|
||||
} catch (YarnRemoteException e) {
|
||||
} catch (IOException e) {
|
||||
// task completed
|
||||
assertTrue(e.getMessage().contains("_0001_m_000000_1"));
|
||||
}
|
||||
|
|
|
@ -62,7 +62,8 @@ public class TestJHSSecurity {
|
|||
private static final Log LOG = LogFactory.getLog(TestJHSSecurity.class);
|
||||
|
||||
@Test
|
||||
public void testDelegationToken() throws IOException, InterruptedException {
|
||||
public void testDelegationToken() throws IOException, InterruptedException,
|
||||
YarnRemoteException {
|
||||
|
||||
Logger rootLogger = LogManager.getRootLogger();
|
||||
rootLogger.setLevel(Level.DEBUG);
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.net.NetUtils;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ContainerId;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
|
@ -110,7 +111,7 @@ public class TestMRJobsWithHistoryService {
|
|||
|
||||
@Test
|
||||
public void testJobHistoryData() throws IOException, InterruptedException,
|
||||
AvroRemoteException, ClassNotFoundException {
|
||||
AvroRemoteException, ClassNotFoundException, YarnRemoteException {
|
||||
if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
|
||||
LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
|
||||
+ " not found. Not running test.");
|
||||
|
|
|
@ -31,6 +31,9 @@ Release 2.0.5-beta - UNRELEASED
|
|||
favour of the copy present in the container token field.
|
||||
(Vinod Kumar Vavilapalli via sseth)
|
||||
|
||||
YARN-629. Make YarnRemoteException not be rooted at IOException. (Xuan Gong
|
||||
via vinodkv)
|
||||
|
||||
NEW FEATURES
|
||||
|
||||
YARN-482. FS: Extend SchedulingMode to intermediate queues.
|
||||
|
|
|
@ -18,11 +18,10 @@
|
|||
|
||||
package org.apache.hadoop.yarn.exceptions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.PrintStream;
|
||||
import java.io.PrintWriter;
|
||||
|
||||
public abstract class YarnRemoteException extends IOException {
|
||||
public abstract class YarnRemoteException extends Exception {
|
||||
private static final long serialVersionUID = 1L;
|
||||
|
||||
public YarnRemoteException() {
|
||||
|
|
|
@ -310,8 +310,9 @@ public class Client extends YarnClientImpl {
|
|||
* Main run function for the client
|
||||
* @return true if application completed successfully
|
||||
* @throws IOException
|
||||
* @throws YarnRemoteException
|
||||
*/
|
||||
public boolean run() throws IOException {
|
||||
public boolean run() throws IOException, YarnRemoteException {
|
||||
|
||||
LOG.info("Running Client");
|
||||
start();
|
||||
|
|
|
@ -271,7 +271,7 @@ public class UnmanagedAMLauncher {
|
|||
amProc.destroy();
|
||||
}
|
||||
|
||||
public boolean run() throws IOException {
|
||||
public boolean run() throws IOException, YarnRemoteException {
|
||||
LOG.info("Starting Client");
|
||||
|
||||
// Connect to ResourceManager
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsRequest;
|
|||
import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||
|
@ -187,7 +188,7 @@ public class RMAdmin extends Configured implements Tool {
|
|||
return adminProtocol;
|
||||
}
|
||||
|
||||
private int refreshQueues() throws IOException {
|
||||
private int refreshQueues() throws IOException, YarnRemoteException {
|
||||
// Refresh the queue properties
|
||||
RMAdminProtocol adminProtocol = createAdminProtocol();
|
||||
RefreshQueuesRequest request =
|
||||
|
@ -196,7 +197,7 @@ public class RMAdmin extends Configured implements Tool {
|
|||
return 0;
|
||||
}
|
||||
|
||||
private int refreshNodes() throws IOException {
|
||||
private int refreshNodes() throws IOException, YarnRemoteException {
|
||||
// Refresh the nodes
|
||||
RMAdminProtocol adminProtocol = createAdminProtocol();
|
||||
RefreshNodesRequest request =
|
||||
|
@ -205,7 +206,8 @@ public class RMAdmin extends Configured implements Tool {
|
|||
return 0;
|
||||
}
|
||||
|
||||
private int refreshUserToGroupsMappings() throws IOException {
|
||||
private int refreshUserToGroupsMappings() throws IOException,
|
||||
YarnRemoteException {
|
||||
// Refresh the user-to-groups mappings
|
||||
RMAdminProtocol adminProtocol = createAdminProtocol();
|
||||
RefreshUserToGroupsMappingsRequest request =
|
||||
|
@ -214,7 +216,8 @@ public class RMAdmin extends Configured implements Tool {
|
|||
return 0;
|
||||
}
|
||||
|
||||
private int refreshSuperUserGroupsConfiguration() throws IOException {
|
||||
private int refreshSuperUserGroupsConfiguration() throws IOException,
|
||||
YarnRemoteException {
|
||||
// Refresh the super-user groups
|
||||
RMAdminProtocol adminProtocol = createAdminProtocol();
|
||||
RefreshSuperUserGroupsConfigurationRequest request =
|
||||
|
@ -223,7 +226,7 @@ public class RMAdmin extends Configured implements Tool {
|
|||
return 0;
|
||||
}
|
||||
|
||||
private int refreshAdminAcls() throws IOException {
|
||||
private int refreshAdminAcls() throws IOException, YarnRemoteException {
|
||||
// Refresh the admin acls
|
||||
RMAdminProtocol adminProtocol = createAdminProtocol();
|
||||
RefreshAdminAclsRequest request =
|
||||
|
@ -232,7 +235,7 @@ public class RMAdmin extends Configured implements Tool {
|
|||
return 0;
|
||||
}
|
||||
|
||||
private int refreshServiceAcls() throws IOException {
|
||||
private int refreshServiceAcls() throws IOException, YarnRemoteException {
|
||||
// Refresh the service acls
|
||||
RMAdminProtocol adminProtocol = createAdminProtocol();
|
||||
RefreshServiceAclsRequest request =
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
package org.apache.hadoop.yarn.ipc;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.lang.reflect.Constructor;
|
||||
import java.lang.reflect.UndeclaredThrowableException;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -71,12 +72,27 @@ public class RPCUtil {
|
|||
throws UndeclaredThrowableException {
|
||||
if (se.getCause() instanceof RemoteException) {
|
||||
try {
|
||||
RemoteException re = (RemoteException) se.getCause();
|
||||
Class<?> realClass = Class.forName(re.getClassName());
|
||||
//YarnRemoteException is not rooted as IOException.
|
||||
//Do the explicitly check if it is YarnRemoteException
|
||||
if (YarnRemoteException.class.isAssignableFrom(realClass)) {
|
||||
Constructor<? extends YarnRemoteException> cn =
|
||||
realClass.asSubclass(YarnRemoteException.class).getConstructor(
|
||||
String.class);
|
||||
cn.setAccessible(true);
|
||||
YarnRemoteException ex = cn.newInstance(re.getMessage());
|
||||
ex.initCause(re);
|
||||
return ex;
|
||||
} else {
|
||||
throw ((RemoteException) se.getCause())
|
||||
.unwrapRemoteException(YarnRemoteExceptionPBImpl.class);
|
||||
} catch (YarnRemoteException ex) {
|
||||
return ex;
|
||||
}
|
||||
} catch (IOException e1) {
|
||||
throw new UndeclaredThrowableException(e1);
|
||||
} catch (Exception ex) {
|
||||
throw new UndeclaredThrowableException(
|
||||
(RemoteException) se.getCause());
|
||||
}
|
||||
} else if (se.getCause() instanceof YarnRemoteException) {
|
||||
return (YarnRemoteException) se.getCause();
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
|
|||
import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
|
||||
import org.apache.hadoop.yarn.api.records.DelegationToken;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||
import org.apache.hadoop.yarn.util.BuilderUtils;
|
||||
import org.apache.hadoop.yarn.util.Records;
|
||||
|
@ -105,6 +106,8 @@ public class RMDelegationTokenIdentifier extends AbstractDelegationTokenIdentifi
|
|||
Records.newRecord(RenewDelegationTokenRequest.class);
|
||||
request.setDelegationToken(convertToProtoToken(token));
|
||||
return rmClient.renewDelegationToken(request).getNextExpirationTime();
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
} finally {
|
||||
RPC.stopProxy(rmClient);
|
||||
}
|
||||
|
@ -125,6 +128,8 @@ public class RMDelegationTokenIdentifier extends AbstractDelegationTokenIdentifi
|
|||
Records.newRecord(CancelDelegationTokenRequest.class);
|
||||
request.setDelegationToken(convertToProtoToken(token));
|
||||
rmClient.cancelDelegationToken(request);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
} finally {
|
||||
RPC.stopProxy(rmClient);
|
||||
}
|
||||
|
|
|
@ -40,6 +40,7 @@ import org.apache.hadoop.security.authorize.PolicyProvider;
|
|||
import org.apache.hadoop.security.token.Token;
|
||||
import org.apache.hadoop.security.token.TokenIdentifier;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import org.apache.hadoop.yarn.YarnException;
|
||||
import org.apache.hadoop.yarn.api.ContainerManager;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
|
||||
import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
|
||||
|
|
|
@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path;
|
|||
import org.apache.hadoop.fs.UnsupportedFileSystemException;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
|
||||
import org.junit.After;
|
||||
|
||||
|
@ -73,7 +74,8 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void testContainerSetup() throws IOException, InterruptedException {
|
||||
public void testContainerSetup() throws IOException, InterruptedException,
|
||||
YarnRemoteException {
|
||||
// Don't run the test if the binary is not available.
|
||||
if (!shouldRunTest()) {
|
||||
LOG.info("LCE binary path is not passed. Not running the test");
|
||||
|
@ -96,7 +98,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
|
|||
|
||||
@Override
|
||||
public void testContainerLaunchAndStop() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
// Don't run the test if the binary is not available.
|
||||
if (!shouldRunTest()) {
|
||||
LOG.info("LCE binary path is not passed. Not running the test");
|
||||
|
@ -108,7 +110,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
|
|||
|
||||
@Override
|
||||
public void testContainerLaunchAndExitSuccess() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
// Don't run the test if the binary is not available.
|
||||
if (!shouldRunTest()) {
|
||||
LOG.info("LCE binary path is not passed. Not running the test");
|
||||
|
@ -120,7 +122,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
|
|||
|
||||
@Override
|
||||
public void testContainerLaunchAndExitFailure() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
// Don't run the test if the binary is not available.
|
||||
if (!shouldRunTest()) {
|
||||
LOG.info("LCE binary path is not passed. Not running the test");
|
||||
|
@ -132,7 +134,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
|
|||
|
||||
@Override
|
||||
public void testLocalFilesCleanup() throws InterruptedException,
|
||||
IOException {
|
||||
IOException, YarnRemoteException {
|
||||
// Don't run the test if the binary is not available.
|
||||
if (!shouldRunTest()) {
|
||||
LOG.info("LCE binary path is not passed. Not running the test");
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
|
|||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.api.ResourceTracker;
|
||||
|
@ -62,7 +63,7 @@ public class TestEventFlow {
|
|||
|
||||
@Test
|
||||
public void testSuccessfulContainerLaunch() throws InterruptedException,
|
||||
IOException {
|
||||
IOException, YarnRemoteException {
|
||||
|
||||
FileContext localFS = FileContext.getLocalFSFileContext();
|
||||
|
||||
|
|
|
@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
|
|||
import org.apache.hadoop.yarn.api.records.URL;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
|
||||
|
@ -87,7 +88,8 @@ public class TestNodeManagerReboot {
|
|||
}
|
||||
|
||||
@Test(timeout = 20000)
|
||||
public void testClearLocalDirWhenNodeReboot() throws IOException {
|
||||
public void testClearLocalDirWhenNodeReboot() throws IOException,
|
||||
YarnRemoteException {
|
||||
nm = new MyNodeManager();
|
||||
nm.start();
|
||||
// create files under fileCache
|
||||
|
|
|
@ -84,7 +84,7 @@ public class TestNodeManagerResync {
|
|||
@SuppressWarnings("unchecked")
|
||||
@Test
|
||||
public void testKillContainersOnResync() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
NodeManager nm = new TestNodeManager1();
|
||||
YarnConfiguration conf = createNMConfig();
|
||||
nm.init(conf);
|
||||
|
@ -111,7 +111,7 @@ public class TestNodeManagerResync {
|
|||
@SuppressWarnings("unchecked")
|
||||
@Test
|
||||
public void testBlockNewContainerRequestsOnStartAndResync()
|
||||
throws IOException, InterruptedException {
|
||||
throws IOException, InterruptedException, YarnRemoteException {
|
||||
NodeManager nm = new TestNodeManager2();
|
||||
YarnConfiguration conf = createNMConfig();
|
||||
nm.init(conf);
|
||||
|
|
|
@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
|
|||
import org.apache.hadoop.yarn.api.records.URL;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.Dispatcher;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
|
||||
|
@ -93,7 +94,8 @@ public class TestNodeManagerShutdown {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testKillContainersOnShutdown() throws IOException {
|
||||
public void testKillContainersOnShutdown() throws IOException,
|
||||
YarnRemoteException {
|
||||
NodeManager nm = getNodeManager();
|
||||
nm.init(createNMConfig());
|
||||
nm.start();
|
||||
|
@ -132,7 +134,8 @@ public class TestNodeManagerShutdown {
|
|||
}
|
||||
|
||||
public static void startContainer(NodeManager nm, FileContext localFS,
|
||||
File scriptFileDir, File processStartFile) throws IOException {
|
||||
File scriptFileDir, File processStartFile) throws IOException,
|
||||
YarnRemoteException {
|
||||
ContainerManagerImpl containerManager = nm.getContainerManager();
|
||||
File scriptFile =
|
||||
createUnhaltingScriptFile(scriptFileDir, processStartFile);
|
||||
|
|
|
@ -111,7 +111,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testContainerSetup() throws IOException, InterruptedException {
|
||||
public void testContainerSetup() throws IOException, InterruptedException,
|
||||
YarnRemoteException {
|
||||
|
||||
containerManager.start();
|
||||
|
||||
|
@ -202,7 +203,7 @@ public class TestContainerManager extends BaseContainerManagerTest {
|
|||
|
||||
@Test
|
||||
public void testContainerLaunchAndStop() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
containerManager.start();
|
||||
|
||||
File scriptFile = new File(tmpDir, "scriptFile.sh");
|
||||
|
@ -306,7 +307,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
|
|||
pid, Signal.NULL));
|
||||
}
|
||||
|
||||
private void testContainerLaunchAndExit(int exitCode) throws IOException, InterruptedException {
|
||||
private void testContainerLaunchAndExit(int exitCode) throws IOException,
|
||||
InterruptedException, YarnRemoteException {
|
||||
|
||||
File scriptFile = new File(tmpDir, "scriptFile.sh");
|
||||
PrintWriter fileWriter = new PrintWriter(scriptFile);
|
||||
|
@ -381,7 +383,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testContainerLaunchAndExitSuccess() throws IOException, InterruptedException {
|
||||
public void testContainerLaunchAndExitSuccess() throws IOException,
|
||||
InterruptedException, YarnRemoteException {
|
||||
containerManager.start();
|
||||
int exitCode = 0;
|
||||
|
||||
|
@ -391,7 +394,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testContainerLaunchAndExitFailure() throws IOException, InterruptedException {
|
||||
public void testContainerLaunchAndExitFailure() throws IOException,
|
||||
InterruptedException, YarnRemoteException {
|
||||
containerManager.start();
|
||||
int exitCode = 50;
|
||||
|
||||
|
@ -402,7 +406,7 @@ public class TestContainerManager extends BaseContainerManagerTest {
|
|||
|
||||
@Test
|
||||
public void testLocalFilesCleanup() throws InterruptedException,
|
||||
IOException {
|
||||
IOException, YarnRemoteException {
|
||||
// Real del service
|
||||
delSrvc = new DeletionService(exec);
|
||||
delSrvc.init(conf);
|
||||
|
|
|
@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
|||
import org.apache.hadoop.yarn.event.DrainDispatcher;
|
||||
import org.apache.hadoop.yarn.event.Event;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
|
||||
|
@ -663,7 +664,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
|
|||
|
||||
@Test
|
||||
public void testLogAggregationForRealContainerLaunch() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
|
||||
this.containerManager.start();
|
||||
|
||||
|
|
|
@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
|
|||
import org.apache.hadoop.yarn.api.records.URL;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
|
||||
import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
|
||||
|
@ -177,7 +178,7 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
|
|||
|
||||
@Test
|
||||
public void testContainerKillOnMemoryOverflow() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
|
||||
if (!ProcfsBasedProcessTree.isAvailable()) {
|
||||
return;
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
|||
import org.apache.hadoop.yarn.api.records.NodeId;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.ipc.YarnRPC;
|
||||
|
@ -98,7 +99,7 @@ public class AMLauncher implements Runnable {
|
|||
containerMgrProxy = getContainerMgrProxy(masterContainerID);
|
||||
}
|
||||
|
||||
private void launch() throws IOException {
|
||||
private void launch() throws IOException, YarnRemoteException {
|
||||
connect();
|
||||
ContainerId masterContainerID = masterContainer.getId();
|
||||
ApplicationSubmissionContext applicationContext =
|
||||
|
@ -116,7 +117,7 @@ public class AMLauncher implements Runnable {
|
|||
+ " for AM " + application.getAppAttemptId());
|
||||
}
|
||||
|
||||
private void cleanup() throws IOException {
|
||||
private void cleanup() throws IOException, YarnRemoteException {
|
||||
connect();
|
||||
ContainerId containerId = masterContainer.getId();
|
||||
StopContainerRequest stopRequest =
|
||||
|
@ -256,6 +257,8 @@ public class AMLauncher implements Runnable {
|
|||
cleanup();
|
||||
} catch(IOException ie) {
|
||||
LOG.info("Error cleaning master ", ie);
|
||||
} catch (YarnRemoteException e) {
|
||||
LOG.info("Error cleaning master ", e);
|
||||
}
|
||||
break;
|
||||
default:
|
||||
|
|
|
@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
|
|||
import org.apache.hadoop.yarn.api.records.Priority;
|
||||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.Task.State;
|
||||
|
@ -127,7 +128,7 @@ public class Application {
|
|||
return used;
|
||||
}
|
||||
|
||||
public synchronized void submit() throws IOException {
|
||||
public synchronized void submit() throws IOException, YarnRemoteException {
|
||||
ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
|
||||
context.setApplicationId(this.applicationId);
|
||||
context.getAMContainerSpec().setUser(this.user);
|
||||
|
@ -201,7 +202,8 @@ public class Application {
|
|||
addResourceRequest(priority, requests, ResourceRequest.ANY, capability);
|
||||
}
|
||||
|
||||
public synchronized void finishTask(Task task) throws IOException {
|
||||
public synchronized void finishTask(Task task) throws IOException,
|
||||
YarnRemoteException {
|
||||
Set<Task> tasks = this.tasks.get(task.getPriority());
|
||||
if (!tasks.remove(task)) {
|
||||
throw new IllegalStateException(
|
||||
|
@ -288,7 +290,7 @@ public class Application {
|
|||
}
|
||||
|
||||
public synchronized void assign(List<Container> containers)
|
||||
throws IOException {
|
||||
throws IOException, YarnRemoteException {
|
||||
|
||||
int numContainers = containers.size();
|
||||
// Schedule in priority order
|
||||
|
@ -307,12 +309,12 @@ public class Application {
|
|||
assignedContainers + "/" + numContainers);
|
||||
}
|
||||
|
||||
public synchronized void schedule() throws IOException {
|
||||
public synchronized void schedule() throws IOException, YarnRemoteException {
|
||||
assign(getResources());
|
||||
}
|
||||
|
||||
private synchronized void assign(Priority priority, NodeType type,
|
||||
List<Container> containers) throws IOException {
|
||||
List<Container> containers) throws IOException, YarnRemoteException {
|
||||
for (Iterator<Container> i=containers.iterator(); i.hasNext();) {
|
||||
Container container = i.next();
|
||||
String host = container.getNodeId().toString();
|
||||
|
|
|
@ -81,7 +81,7 @@ public class NodeManager implements ContainerManager {
|
|||
public NodeManager(String hostName, int containerManagerPort, int httpPort,
|
||||
String rackName, Resource capability,
|
||||
ResourceTrackerService resourceTrackerService, RMContext rmContext)
|
||||
throws IOException {
|
||||
throws IOException, YarnRemoteException {
|
||||
this.containerManagerAddress = hostName + ":" + containerManagerPort;
|
||||
this.nodeHttpAddress = hostName + ":" + httpPort;
|
||||
this.rackName = rackName;
|
||||
|
@ -144,7 +144,7 @@ public class NodeManager implements ContainerManager {
|
|||
}
|
||||
return containerStatuses;
|
||||
}
|
||||
public void heartbeat() throws IOException {
|
||||
public void heartbeat() throws IOException, YarnRemoteException {
|
||||
NodeStatus nodeStatus =
|
||||
org.apache.hadoop.yarn.server.resourcemanager.NodeManager.createNodeStatus(
|
||||
nodeId, getContainerStatuses(containers));
|
||||
|
|
|
@ -205,15 +205,19 @@ public class TestClientRMService {
|
|||
owner.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
checkTokenRenewal(owner, other);
|
||||
return null;
|
||||
}
|
||||
});
|
||||
} catch (YarnRemoteException e) {
|
||||
Assert.assertEquals(e.getMessage(),
|
||||
} catch (YarnRemoteException ex) {
|
||||
Assert.assertEquals(ex.getMessage(),
|
||||
"Client " + owner.getUserName() +
|
||||
" tries to renew a token with renewer specified as " +
|
||||
other.getUserName());
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
return;
|
||||
}
|
||||
Assert.fail("renew should have failed");
|
||||
|
@ -232,7 +236,7 @@ public class TestClientRMService {
|
|||
}
|
||||
|
||||
private void checkTokenRenewal(UserGroupInformation owner,
|
||||
UserGroupInformation renewer) throws IOException {
|
||||
UserGroupInformation renewer) throws IOException, YarnRemoteException {
|
||||
RMDelegationTokenIdentifier tokenIdentifier =
|
||||
new RMDelegationTokenIdentifier(
|
||||
new Text(owner.getUserName()), new Text(renewer.getUserName()), null);
|
||||
|
@ -312,7 +316,8 @@ public class TestClientRMService {
|
|||
|
||||
@Test(timeout=4000)
|
||||
public void testConcurrentAppSubmit()
|
||||
throws IOException, InterruptedException, BrokenBarrierException {
|
||||
throws IOException, InterruptedException, BrokenBarrierException,
|
||||
YarnRemoteException {
|
||||
YarnScheduler yarnScheduler = mockYarnScheduler();
|
||||
RMContext rmContext = mock(RMContext.class);
|
||||
mockRMContext(yarnScheduler, rmContext);
|
||||
|
|
|
@ -72,7 +72,8 @@ public class TestClientRMTokens {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testDelegationToken() throws IOException, InterruptedException {
|
||||
public void testDelegationToken() throws IOException, InterruptedException,
|
||||
YarnRemoteException {
|
||||
|
||||
final YarnConfiguration conf = new YarnConfiguration();
|
||||
conf.set(YarnConfiguration.RM_PRINCIPAL, "testuser/localhost@apache.org");
|
||||
|
|
|
@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
|
|||
import org.apache.hadoop.yarn.api.records.Resource;
|
||||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
|
||||
import org.junit.After;
|
||||
|
@ -59,7 +60,8 @@ public class TestResourceManager {
|
|||
|
||||
private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
|
||||
registerNode(String hostName, int containerManagerPort, int httpPort,
|
||||
String rackName, Resource capability) throws IOException {
|
||||
String rackName, Resource capability) throws IOException,
|
||||
YarnRemoteException {
|
||||
return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
|
||||
hostName, containerManagerPort, httpPort, rackName, capability,
|
||||
resourceManager.getResourceTrackerService(), resourceManager
|
||||
|
@ -67,7 +69,8 @@ public class TestResourceManager {
|
|||
}
|
||||
|
||||
// @Test
|
||||
public void testResourceAllocation() throws IOException {
|
||||
public void testResourceAllocation() throws IOException,
|
||||
YarnRemoteException {
|
||||
LOG.info("--- START: testResourceAllocation ---");
|
||||
|
||||
final int memory = 4 * 1024;
|
||||
|
|
|
@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
|
|||
import org.apache.hadoop.yarn.event.Event;
|
||||
import org.apache.hadoop.yarn.event.EventHandler;
|
||||
import org.apache.hadoop.yarn.event.InlineDispatcher;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
|
||||
|
@ -88,7 +89,7 @@ public class TestRMNMRPCResponseId {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testRPCResponseId() throws IOException {
|
||||
public void testRPCResponseId() throws IOException, YarnRemoteException {
|
||||
String node = "localhost";
|
||||
Resource capability = BuilderUtils.newResource(1024, 1);
|
||||
RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
|
||||
|
|
|
@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
|
|||
import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.Application;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
|
||||
|
@ -101,7 +102,7 @@ public class TestCapacityScheduler {
|
|||
private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
|
||||
registerNode(String hostName, int containerManagerPort, int httpPort,
|
||||
String rackName, Resource capability)
|
||||
throws IOException {
|
||||
throws IOException, YarnRemoteException {
|
||||
return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
|
||||
hostName, containerManagerPort, httpPort, rackName, capability,
|
||||
resourceManager.getResourceTrackerService(), resourceManager
|
||||
|
|
|
@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
|
|||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.event.AsyncDispatcher;
|
||||
import org.apache.hadoop.yarn.event.InlineDispatcher;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.factories.RecordFactory;
|
||||
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
|
||||
import org.apache.hadoop.yarn.server.resourcemanager.Application;
|
||||
|
@ -84,7 +85,8 @@ public class TestFifoScheduler {
|
|||
|
||||
private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
|
||||
registerNode(String hostName, int containerManagerPort, int nmHttpPort,
|
||||
String rackName, Resource capability) throws IOException {
|
||||
String rackName, Resource capability) throws IOException,
|
||||
YarnRemoteException {
|
||||
return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
|
||||
hostName, containerManagerPort, nmHttpPort, rackName, capability,
|
||||
resourceManager.getResourceTrackerService(), resourceManager
|
||||
|
|
|
@ -78,7 +78,7 @@ public class TestClientTokens {
|
|||
private interface CustomProtocol {
|
||||
public static final long versionID = 1L;
|
||||
|
||||
public void ping();
|
||||
public void ping() throws YarnRemoteException;
|
||||
}
|
||||
|
||||
private static class CustomSecurityInfo extends SecurityInfo {
|
||||
|
@ -121,7 +121,7 @@ public class TestClientTokens {
|
|||
}
|
||||
|
||||
@Override
|
||||
public void ping() {
|
||||
public void ping() throws YarnRemoteException {
|
||||
this.pinged = true;
|
||||
}
|
||||
|
||||
|
@ -273,6 +273,7 @@ public class TestClientTokens {
|
|||
ugi.doAs(new PrivilegedExceptionAction<Void>() {
|
||||
@Override
|
||||
public Void run() throws Exception {
|
||||
try {
|
||||
CustomProtocol client =
|
||||
(CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
|
||||
am.address, conf);
|
||||
|
@ -280,11 +281,13 @@ public class TestClientTokens {
|
|||
fail("Connection initiation with illegally modified "
|
||||
+ "tokens is expected to fail.");
|
||||
return null;
|
||||
}
|
||||
});
|
||||
} catch (YarnRemoteException e) {
|
||||
} catch (YarnRemoteException ex) {
|
||||
fail("Cannot get a YARN remote exception as "
|
||||
+ "it will indicate RPC success");
|
||||
throw ex;
|
||||
}
|
||||
}
|
||||
});
|
||||
} catch (Exception e) {
|
||||
Assert
|
||||
.assertEquals(java.lang.reflect.UndeclaredThrowableException.class
|
||||
|
|
|
@ -311,7 +311,7 @@ public class MiniYARNCluster extends CompositeService {
|
|||
NodeHeartbeatResponse.class);
|
||||
try {
|
||||
response = rt.nodeHeartbeat(request);
|
||||
} catch (IOException ioe) {
|
||||
} catch (YarnRemoteException ioe) {
|
||||
LOG.info("Exception in heartbeat from node " +
|
||||
request.getNodeStatus().getNodeId(), ioe);
|
||||
throw RPCUtil.getRemoteException(ioe);
|
||||
|
@ -327,7 +327,7 @@ public class MiniYARNCluster extends CompositeService {
|
|||
newRecordInstance(RegisterNodeManagerResponse.class);
|
||||
try {
|
||||
response = rt.registerNodeManager(request);
|
||||
} catch (IOException ioe) {
|
||||
} catch (YarnRemoteException ioe) {
|
||||
LOG.info("Exception in node registration from "
|
||||
+ request.getNodeId().toString(), ioe);
|
||||
throw RPCUtil.getRemoteException(ioe);
|
||||
|
|
|
@ -127,7 +127,7 @@ public class TestContainerManagerSecurity {
|
|||
|
||||
@Test
|
||||
public void testAuthenticatedUser() throws IOException,
|
||||
InterruptedException {
|
||||
InterruptedException, YarnRemoteException {
|
||||
|
||||
LOG.info("Running test for authenticated user");
|
||||
|
||||
|
@ -179,7 +179,8 @@ public class TestContainerManagerSecurity {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testMaliceUser() throws IOException, InterruptedException {
|
||||
public void testMaliceUser() throws IOException, InterruptedException,
|
||||
YarnRemoteException {
|
||||
|
||||
LOG.info("Running test for malice user");
|
||||
|
||||
|
@ -265,7 +266,8 @@ public class TestContainerManagerSecurity {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void testUnauthorizedUser() throws IOException, InterruptedException {
|
||||
public void testUnauthorizedUser() throws IOException, InterruptedException,
|
||||
YarnRemoteException {
|
||||
|
||||
LOG.info("\n\nRunning test for malice user");
|
||||
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
|
|||
import org.apache.hadoop.yarn.api.records.ApplicationId;
|
||||
import org.apache.hadoop.yarn.api.records.ApplicationReport;
|
||||
import org.apache.hadoop.yarn.conf.YarnConfiguration;
|
||||
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
|
||||
import org.apache.hadoop.yarn.util.Apps;
|
||||
import org.apache.hadoop.yarn.util.StringHelper;
|
||||
import org.apache.hadoop.yarn.util.TrackingUriPlugin;
|
||||
|
@ -215,7 +216,8 @@ public class WebAppProxyServlet extends HttpServlet {
|
|||
return false;
|
||||
}
|
||||
|
||||
private ApplicationReport getApplicationReport(ApplicationId id) throws IOException {
|
||||
private ApplicationReport getApplicationReport(ApplicationId id)
|
||||
throws IOException, YarnRemoteException {
|
||||
return ((AppReportFetcher) getServletContext()
|
||||
.getAttribute(WebAppProxy.FETCHER_ATTRIBUTE)).getApplicationReport(id);
|
||||
}
|
||||
|
@ -333,6 +335,8 @@ public class WebAppProxyServlet extends HttpServlet {
|
|||
|
||||
} catch(URISyntaxException e) {
|
||||
throw new IOException(e);
|
||||
} catch (YarnRemoteException e) {
|
||||
throw new IOException(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue