From 69fa81679f59378fd19a2c65db8019393d7c05a2 Mon Sep 17 00:00:00 2001 From: Jason Lowe Date: Mon, 23 Jan 2017 17:12:51 +0000 Subject: [PATCH] YARN-5910. Support for multi-cluster delegation tokens. Contributed by Jian He --- .../apache/hadoop/mapreduce/MRJobConfig.java | 2 + .../src/main/resources/mapred-default.xml | 18 +++ .../org/apache/hadoop/mapred/YARNRunner.java | 36 +++++ .../apache/hadoop/mapred/TestYARNRunner.java | 39 ++++++ .../api/records/ContainerLaunchContext.java | 16 +++ .../hadoop/yarn/conf/YarnConfiguration.java | 7 +- .../src/main/proto/yarn_protos.proto | 2 + .../impl/pb/ContainerLaunchContextPBImpl.java | 26 ++++ .../src/main/resources/yarn-default.xml | 10 ++ .../yarn/server/utils/BuilderUtils.java | 32 +++++ .../resourcemanager/ClientRMService.java | 19 ++- .../server/resourcemanager/RMAppManager.java | 30 +---- .../resourcemanager/rmapp/RMAppImpl.java | 23 ++-- .../security/DelegationTokenRenewer.java | 74 ++++++---- .../yarn/server/resourcemanager/MockRM.java | 36 +++-- .../resourcemanager/TestAppManager.java | 41 ++---- .../server/resourcemanager/TestRMRestart.java | 37 +++-- .../TestRMAppLogAggregationStatus.java | 8 +- .../rmapp/TestRMAppTransitions.java | 2 +- .../scheduler/fair/FairSchedulerTestBase.java | 9 +- .../security/TestDelegationTokenRenewer.java | 127 ++++++++++++++++-- 21 files changed, 448 insertions(+), 146 deletions(-) diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index 7add7df43a5..ab482553838 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -1013,4 +1013,6 @@ public interface MRJobConfig { * A comma-separated list of properties whose value will be redacted. */ String MR_JOB_REDACTED_PROPERTIES = "mapreduce.job.redacted-properties"; + + String MR_JOB_SEND_TOKEN_CONF = "mapreduce.job.send-token-conf"; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index 1b4f373c9e3..a163fbd81ba 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -1989,4 +1989,22 @@ mapreduce.job.redacted-properties + + + + This configuration is a regex expression. The list of configurations that + match the regex expression will be sent to RM. RM will use these + configurations for renewing tokens. + This configuration is added for below scenario: User needs to run distcp + jobs across two clusters, but the RM does not have necessary hdfs + configurations to connect to the remote hdfs cluster. Hence, user relies on + this config to send the configurations to RM and RM uses these + configurations to renew tokens. + For example the following regex expression indicates the minimum required + configs for RM to connect to a remote hdfs cluster: + dfs.nameservices|^dfs.namenode.rpc-address.*$|^dfs.ha.namenodes.*$|^dfs.client.failover.proxy.provider.*$|dfs.namenode.kerberos.principal + + mapreduce.job.send-token-conf + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index 4c6f0f3139d..98fe5535cf3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -24,6 +24,7 @@ import java.util.Collection; import java.util.HashMap; import java.util.HashSet; +import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.Vector; @@ -499,6 +500,12 @@ public ApplicationSubmissionContext createApplicationSubmissionContext( ContainerLaunchContext.newInstance(localResources, environment, vargsFinal, null, securityTokens, acls); + String regex = conf.get(MRJobConfig.MR_JOB_SEND_TOKEN_CONF); + if (regex != null && !regex.isEmpty()) { + setTokenRenewerConf(amContainer, conf, regex); + } + + Collection tagsFromConf = jobConf.getTrimmedStringCollection(MRJobConfig.JOB_TAGS); @@ -576,6 +583,35 @@ public ApplicationSubmissionContext createApplicationSubmissionContext( return appContext; } + private void setTokenRenewerConf(ContainerLaunchContext context, + Configuration conf, String regex) throws IOException { + DataOutputBuffer dob = new DataOutputBuffer(); + Configuration copy = new Configuration(false); + copy.clear(); + int count = 0; + for (Map.Entry map : conf) { + String key = map.getKey(); + String val = map.getValue(); + if (key.matches(regex)) { + copy.set(key, val); + count++; + } + } + copy.write(dob); + ByteBuffer appConf = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + LOG.info("Send configurations that match regex expression: " + regex + + " , total number of configs: " + count + ", total size : " + dob + .getLength() + " bytes."); + if (LOG.isDebugEnabled()) { + for (Iterator> itor = copy.iterator(); itor + .hasNext(); ) { + Map.Entry entry = itor.next(); + LOG.info(entry.getKey() + " ===> " + entry.getValue()); + } + } + context.setTokensConf(appConf); + } + @Override public void setJobPriority(JobID arg0, String arg1) throws IOException, InterruptedException { diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 0e17ac84735..279c8cec853 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -47,6 +47,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeys; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; @@ -99,6 +100,7 @@ import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.security.client.RMDelegationTokenIdentifier; +import org.apache.hadoop.yarn.server.utils.BuilderUtils; import org.apache.hadoop.yarn.util.Records; import org.apache.log4j.Appender; import org.apache.log4j.Layout; @@ -106,6 +108,7 @@ import org.apache.log4j.SimpleLayout; import org.apache.log4j.WriterAppender; import org.junit.After; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; @@ -675,4 +678,40 @@ private ApplicationSubmissionContext buildSubmitContext( return yarnRunner.createApplicationSubmissionContext(jobConf, testWorkDir.toString(), new Credentials()); } + + // Test configs that match regex expression should be set in + // containerLaunchContext + @Test + public void testSendJobConf() throws IOException { + JobConf jobConf = new JobConf(); + jobConf.set("dfs.nameservices", "mycluster1,mycluster2"); + jobConf.set("dfs.namenode.rpc-address.mycluster2.nn1", "123.0.0.1"); + jobConf.set("dfs.namenode.rpc-address.mycluster2.nn2", "123.0.0.2"); + jobConf.set("dfs.ha.namenodes.mycluster2", "nn1,nn2"); + jobConf.set("dfs.client.failover.proxy.provider.mycluster2", "provider"); + jobConf.set("hadoop.tmp.dir", "testconfdir"); + jobConf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + jobConf.set("mapreduce.job.send-token-conf", + "dfs.nameservices|^dfs.namenode.rpc-address.*$|^dfs.ha.namenodes.*$" + + "|^dfs.client.failover.proxy.provider.*$" + + "|dfs.namenode.kerberos.principal"); + UserGroupInformation.setConfiguration(jobConf); + + YARNRunner yarnRunner = new YARNRunner(jobConf); + ApplicationSubmissionContext submissionContext = + buildSubmitContext(yarnRunner, jobConf); + Configuration confSent = BuilderUtils.parseTokensConf(submissionContext); + + // configs that match regex should be included + Assert.assertTrue(confSent.get("dfs.namenode.rpc-address.mycluster2.nn1") + .equals("123.0.0.1")); + Assert.assertTrue(confSent.get("dfs.namenode.rpc-address.mycluster2.nn2") + .equals("123.0.0.2")); + + // configs that aren't matching regex should not be included + Assert.assertTrue(confSent.get("hadoop.tmp.dir") == null || !confSent + .get("hadoop.tmp.dir").equals("testconfdir")); + UserGroupInformation.reset(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java index 6d4bccd80c8..616aa4ba4e1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerLaunchContext.java @@ -107,6 +107,22 @@ public static ContainerLaunchContext newInstance( @Stable public abstract void setTokens(ByteBuffer tokens); + /** + * Get the configuration used by RM to renew tokens. + * @return The configuration used by RM to renew the tokens. + */ + @Public + @Unstable + public abstract ByteBuffer getTokensConf(); + + /** + * Set the configuration used by RM to renew the tokens. + * @param tokensConf The configuration used by RM to renew the tokens + */ + @Public + @Unstable + public abstract void setTokensConf(ByteBuffer tokensConf); + /** * Get LocalResource required by the container. * @return all LocalResource required by the container diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 7dd5ce33245..7887fbce250 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -526,7 +526,12 @@ public static boolean isAclEnabled(Configuration conf) { RM_PREFIX + "delegation.token.max-lifetime"; public static final long RM_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT = 7*24*60*60*1000; // 7 days - + + public static final String RM_DELEGATION_TOKEN_MAX_CONF_SIZE = + RM_PREFIX + "delegation-token.max-conf-size-bytes"; + public static final int DEFAULT_RM_DELEGATION_TOKEN_MAX_CONF_SIZE_BYTES = + 12800; + public static final String RECOVERY_ENABLED = RM_PREFIX + "recovery.enabled"; public static final boolean DEFAULT_RM_RECOVERY_ENABLED = false; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index a8ba740949a..a6dbf3c2906 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -548,6 +548,8 @@ message ContainerLaunchContextProto { repeated string command = 5; repeated ApplicationACLMapProto application_ACLs = 6; optional ContainerRetryContextProto container_retry_context = 7; + optional bytes tokens_conf = 8; + } message ContainerStatusProto { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java index 1efe5417103..1f76c34960b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerLaunchContextPBImpl.java @@ -54,6 +54,7 @@ public class ContainerLaunchContextPBImpl private Map localResources = null; private ByteBuffer tokens = null; + private ByteBuffer tokensConf = null; private Map serviceData = null; private Map environment = null; private List commands = null; @@ -111,6 +112,9 @@ private void mergeLocalToBuilder() { if (this.tokens != null) { builder.setTokens(convertToProtoFormat(this.tokens)); } + if (this.tokensConf != null) { + builder.setTokensConf(convertToProtoFormat(this.tokensConf)); + } if (this.serviceData != null) { addServiceDataToProto(); } @@ -267,6 +271,28 @@ public void setTokens(ByteBuffer tokens) { this.tokens = tokens; } + @Override + public ByteBuffer getTokensConf() { + ContainerLaunchContextProtoOrBuilder p = viaProto ? proto : builder; + if (this.tokensConf != null) { + return this.tokensConf; + } + if (!p.hasTokensConf()) { + return null; + } + this.tokensConf = convertFromProtoFormat(p.getTokensConf()); + return this.tokensConf; + } + + @Override + public void setTokensConf(ByteBuffer tokensConf) { + maybeInitBuilder(); + if (tokensConf == null) { + builder.clearTokensConf(); + } + this.tokensConf = tokensConf; + } + @Override public Map getServiceData() { initServiceData(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index e1bdcc0327c..1e929a8c500 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -703,6 +703,16 @@ 30000 + + Maximum size in bytes for configurations that can be provided + by application to RM for delegation token renewal. + By experiment, it's roughly 128 bytes per key-value pair. + The default value 12800 allows roughly 100 configs, may be less. + + yarn.resourcemanager.delegation-token.max-conf-size-bytes + 12800 + + If true, ResourceManager will have proxy-user privileges. Use case: In a secure cluster, YARN requires the user hdfs delegation-tokens to diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java index 74c06ff9781..e7f47af2647 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/utils/BuilderUtils.java @@ -29,8 +29,11 @@ import java.util.Set; import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.net.NetUtils; +import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.records.AMCommand; @@ -62,6 +65,8 @@ import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.YarnApplicationState; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.exceptions.YarnException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.nodelabels.CommonNodeLabelsManager; @@ -496,4 +501,31 @@ public static AllocateResponse newAllocateResponse(int responseId, return response; } + + public static Credentials parseCredentials( + ApplicationSubmissionContext application) throws IOException { + Credentials credentials = new Credentials(); + DataInputByteBuffer dibb = new DataInputByteBuffer(); + ByteBuffer tokens = application.getAMContainerSpec().getTokens(); + if (tokens != null) { + dibb.reset(tokens); + credentials.readTokenStorageStream(dibb); + tokens.rewind(); + } + return credentials; + } + + public static Configuration parseTokensConf( + ApplicationSubmissionContext context) throws IOException { + ByteBuffer tokensConf = context.getAMContainerSpec().getTokensConf(); + if (tokensConf == null) { + return null; + } + DataInputByteBuffer dibb = new DataInputByteBuffer(); + dibb.reset(tokensConf); + Configuration appConf = new Configuration(false); + appConf.readFields(dibb); + tokensConf.rewind(); + return appConf; + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java index c375887476e..c4e3bf7453d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/ClientRMService.java @@ -22,6 +22,7 @@ import java.io.InputStream; import java.net.InetAddress; import java.net.InetSocketAddress; +import java.nio.ByteBuffer; import java.security.AccessControlException; import java.text.MessageFormat; import java.util.ArrayList; @@ -614,6 +615,21 @@ public SubmitApplicationResponse submitApplication( return SubmitApplicationResponse.newInstance(); } + ByteBuffer tokenConf = + submissionContext.getAMContainerSpec().getTokensConf(); + if (tokenConf != null) { + int maxSize = getConfig() + .getInt(YarnConfiguration.RM_DELEGATION_TOKEN_MAX_CONF_SIZE, + YarnConfiguration.DEFAULT_RM_DELEGATION_TOKEN_MAX_CONF_SIZE_BYTES); + LOG.info("Using app provided configurations for delegation token renewal," + + " total size = " + tokenConf.capacity()); + if (tokenConf.capacity() > maxSize) { + throw new YarnException( + "Exceed " + YarnConfiguration.RM_DELEGATION_TOKEN_MAX_CONF_SIZE + + " = " + maxSize + " bytes, current conf size = " + + tokenConf.capacity() + " bytes."); + } + } if (submissionContext.getQueue() == null) { submissionContext.setQueue(YarnConfiguration.DEFAULT_QUEUE_NAME); } @@ -648,8 +664,7 @@ public SubmitApplicationResponse submitApplication( RMAuditLogger.logSuccess(user, AuditConstants.SUBMIT_APP_REQUEST, "ClientRMService", applicationId, callerContext); } catch (YarnException e) { - LOG.info("Exception in submitting application with id " + - applicationId.getId(), e); + LOG.info("Exception in submitting " + applicationId, e); RMAuditLogger.logFailure(user, AuditConstants.SUBMIT_APP_REQUEST, e.getMessage(), "ClientRMService", "Exception in submitting application", applicationId, callerContext); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 8c6ba093178..cc796e3f646 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -17,18 +17,14 @@ */ package org.apache.hadoop.yarn.server.resourcemanager; -import java.io.IOException; -import java.nio.ByteBuffer; import java.util.LinkedList; import java.util.Map; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.ipc.Server; import org.apache.hadoop.security.AccessControlException; -import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; @@ -299,14 +295,14 @@ protected void submitApplication( // constructor. RMAppImpl application = createAndPopulateNewRMApp( submissionContext, submitTime, user, false, -1); - Credentials credentials = null; try { - credentials = parseCredentials(submissionContext); if (UserGroupInformation.isSecurityEnabled()) { this.rmContext.getDelegationTokenRenewer() - .addApplicationAsync(applicationId, credentials, + .addApplicationAsync(applicationId, + BuilderUtils.parseCredentials(submissionContext), submissionContext.getCancelTokensWhenComplete(), - application.getUser()); + application.getUser(), + BuilderUtils.parseTokensConf(submissionContext)); } else { // Dispatcher is not yet started at this time, so these START events // enqueued should be guaranteed to be first processed when dispatcher @@ -315,11 +311,10 @@ protected void submitApplication( .handle(new RMAppEvent(applicationId, RMAppEventType.START)); } } catch (Exception e) { - LOG.warn("Unable to parse credentials.", e); + LOG.warn("Unable to parse credentials for " + applicationId, e); // Sending APP_REJECTED is fine, since we assume that the // RMApp is in NEW state and thus we haven't yet informed the // scheduler about the existence of the application - assert application.getState() == RMAppState.NEW; this.rmContext.getDispatcher().getEventHandler() .handle(new RMAppEvent(applicationId, RMAppEventType.APP_REJECTED, e.getMessage())); @@ -515,20 +510,7 @@ private ResourceRequest validateAndCreateResourceRequest( return null; } - - protected Credentials parseCredentials( - ApplicationSubmissionContext application) throws IOException { - Credentials credentials = new Credentials(); - DataInputByteBuffer dibb = new DataInputByteBuffer(); - ByteBuffer tokens = application.getAMContainerSpec().getTokens(); - if (tokens != null) { - dibb.reset(tokens); - credentials.readTokenStorageStream(dibb); - tokens.rewind(); - } - return credentials; - } - + @Override public void recover(RMState state) throws Exception { RMStateStore store = rmContext.getStateStore(); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 4db595e25cd..12ece3f6f36 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -1120,9 +1120,12 @@ public RMAppState transition(RMAppImpl app, RMAppEvent event) { try { app.rmContext.getDelegationTokenRenewer() .addApplicationAsyncDuringRecovery(app.getApplicationId(), - app.parseCredentials(), + BuilderUtils.parseCredentials(app.submissionContext), app.submissionContext.getCancelTokensWhenComplete(), - app.getUser()); + app.getUser(), + BuilderUtils.parseTokensConf(app.submissionContext)); + // set the memory free + app.submissionContext.getAMContainerSpec().setTokensConf(null); } catch (Exception e) { String msg = "Failed to fetch user credentials from application:" + e.getMessage(); @@ -1175,6 +1178,8 @@ public void transition(RMAppImpl app, RMAppEvent event) { app.submissionContext, false, app.applicationPriority)); // send the ATS create Event app.sendATSCreateEvent(); + // Set the memory free after submission context is persisted + app.submissionContext.getAMContainerSpec().setTokensConf(null); } } @@ -1490,6 +1495,8 @@ public void transition(RMAppImpl app, RMAppEvent event) { .applicationFinished(app, finalState); app.rmContext.getSystemMetricsPublisher() .appFinished(app, finalState, app.finishTime); + // set the memory free + app.submissionContext.getAMContainerSpec().setTokensConf(null); }; } @@ -1699,18 +1706,6 @@ public ResourceRequest getAMResourceRequest() { return this.amReq; } - protected Credentials parseCredentials() throws IOException { - Credentials credentials = new Credentials(); - DataInputByteBuffer dibb = new DataInputByteBuffer(); - ByteBuffer tokens = submissionContext.getAMContainerSpec().getTokens(); - if (tokens != null) { - dibb.reset(tokens); - credentials.readTokenStorageStream(dibb); - tokens.rewind(); - } - return credentials; - } - @Override public Map getLogAggregationReportsForApp() { try { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java index dfbf33397d1..abb8d59ff0b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/DelegationTokenRenewer.java @@ -29,6 +29,7 @@ import java.util.HashSet; import java.util.Iterator; import java.util.List; +import java.util.Map; import java.util.Map.Entry; import java.util.Set; import java.util.Timer; @@ -379,43 +380,43 @@ public Set> getDelegationTokens() { * @param applicationId added application * @param ts tokens * @param shouldCancelAtEnd true if tokens should be canceled when the app is - * done else false. + * done else false. * @param user user + * @param tokenConf tokenConf sent by the app-submitter */ public void addApplicationAsync(ApplicationId applicationId, Credentials ts, - boolean shouldCancelAtEnd, String user) { + boolean shouldCancelAtEnd, String user, Configuration tokenConf) { processDelegationTokenRenewerEvent(new DelegationTokenRenewerAppSubmitEvent( - applicationId, ts, shouldCancelAtEnd, user)); + applicationId, ts, shouldCancelAtEnd, user, tokenConf)); } /** * Asynchronously add application tokens for renewal. - * - * @param applicationId + * @param applicationId * added application * @param ts * tokens * @param shouldCancelAtEnd * true if tokens should be canceled when the app is done else false. - * @param user - * user + * @param user user + * @param tokenConf tokenConf sent by the app-submitter */ public void addApplicationAsyncDuringRecovery(ApplicationId applicationId, - Credentials ts, boolean shouldCancelAtEnd, String user) { + Credentials ts, boolean shouldCancelAtEnd, String user, + Configuration tokenConf) { processDelegationTokenRenewerEvent( new DelegationTokenRenewerAppRecoverEvent(applicationId, ts, - shouldCancelAtEnd, user)); + shouldCancelAtEnd, user, tokenConf)); } - /** - * Synchronously renew delegation tokens. - * @param user user - */ + + // Only for testing + // Synchronously renew delegation tokens. public void addApplicationSync(ApplicationId applicationId, Credentials ts, boolean shouldCancelAtEnd, String user) throws IOException, InterruptedException { handleAppSubmitEvent(new DelegationTokenRenewerAppSubmitEvent( - applicationId, ts, shouldCancelAtEnd, user)); + applicationId, ts, shouldCancelAtEnd, user, new Configuration())); } private void handleAppSubmitEvent(AbstractDelegationTokenRenewerAppEvent evt) @@ -455,8 +456,27 @@ private void handleAppSubmitEvent(AbstractDelegationTokenRenewerAppEvent evt) DelegationTokenToRenew dttr = allTokens.get(token); if (dttr == null) { + Configuration tokenConf; + if (evt.tokenConf != null) { + // Override conf with app provided conf - this is required in cases + // where RM does not have the required conf to communicate with + // remote hdfs cluster. The conf is provided by the application + // itself. + tokenConf = evt.tokenConf; + LOG.info("Using app provided token conf for renewal," + + " number of configs = " + tokenConf.size()); + if (LOG.isDebugEnabled()) { + for (Iterator> itor = + tokenConf.iterator(); itor.hasNext(); ) { + Map.Entry entry = itor.next(); + LOG.info(entry.getKey() + " ===> " + entry.getValue()); + } + } + } else { + tokenConf = getConfig(); + } dttr = new DelegationTokenToRenew(Arrays.asList(applicationId), token, - getConfig(), now, shouldCancelAtEnd, evt.getUser()); + tokenConf, now, shouldCancelAtEnd, evt.getUser()); try { renewToken(dttr); } catch (IOException ioe) { @@ -926,22 +946,22 @@ private void handleDTRenewerAppRecoverEvent( } static class DelegationTokenRenewerAppSubmitEvent - extends - AbstractDelegationTokenRenewerAppEvent { + extends AbstractDelegationTokenRenewerAppEvent { public DelegationTokenRenewerAppSubmitEvent(ApplicationId appId, - Credentials credentails, boolean shouldCancelAtEnd, String user) { + Credentials credentails, boolean shouldCancelAtEnd, String user, + Configuration tokenConf) { super(appId, credentails, shouldCancelAtEnd, user, - DelegationTokenRenewerEventType.VERIFY_AND_START_APPLICATION); + DelegationTokenRenewerEventType.VERIFY_AND_START_APPLICATION, tokenConf); } } static class DelegationTokenRenewerAppRecoverEvent - extends - AbstractDelegationTokenRenewerAppEvent { + extends AbstractDelegationTokenRenewerAppEvent { public DelegationTokenRenewerAppRecoverEvent(ApplicationId appId, - Credentials credentails, boolean shouldCancelAtEnd, String user) { + Credentials credentails, boolean shouldCancelAtEnd, String user, + Configuration tokenConf) { super(appId, credentails, shouldCancelAtEnd, user, - DelegationTokenRenewerEventType.RECOVER_APPLICATION); + DelegationTokenRenewerEventType.RECOVER_APPLICATION, tokenConf); } } @@ -949,16 +969,18 @@ static class AbstractDelegationTokenRenewerAppEvent extends DelegationTokenRenewerEvent { private Credentials credentials; + private Configuration tokenConf; private boolean shouldCancelAtEnd; private String user; public AbstractDelegationTokenRenewerAppEvent(ApplicationId appId, - Credentials credentails, boolean shouldCancelAtEnd, String user, - DelegationTokenRenewerEventType type) { + Credentials credentials, boolean shouldCancelAtEnd, String user, + DelegationTokenRenewerEventType type, Configuration tokenConf) { super(appId, type); - this.credentials = credentails; + this.credentials = credentials; this.shouldCancelAtEnd = shouldCancelAtEnd; this.user = user; + this.tokenConf = tokenConf; } public Credentials getCredentials() { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java index 02d39560bcb..7d19dab398f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockRM.java @@ -21,6 +21,7 @@ import java.io.IOException; import java.nio.ByteBuffer; import java.security.PrivilegedAction; +import java.security.PrivilegedExceptionAction; import java.util.Arrays; import java.util.Collection; import java.util.EnumSet; @@ -563,7 +564,7 @@ public RMApp submitApp(int masterMemory, String name, String user, return submitApp(resource, name, user, acls, false, queue, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, false, - false, null, 0, null, true, priority, amLabel, null); + false, null, 0, null, true, priority, amLabel, null, null); } public RMApp submitApp(Resource resource, String name, String user, @@ -664,7 +665,17 @@ public RMApp submitApp(Resource capability, String name, String user, return submitApp(capability, name, user, acls, unmanaged, queue, maxAppAttempts, ts, appType, waitForAccepted, keepContainers, isAppIdProvided, applicationId, attemptFailuresValidityInterval, - logAggregationContext, cancelTokensWhenComplete, priority, "", null); + logAggregationContext, cancelTokensWhenComplete, priority, "", null, + null); + } + + public RMApp submitApp(Credentials cred, ByteBuffer tokensConf) + throws Exception { + return submitApp(Resource.newInstance(200, 1), "app1", "user", null, false, + null, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, + YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), cred, null, true, + false, false, null, 0, null, true, Priority.newInstance(0), null, null, + tokensConf); } public RMApp submitApp(Resource capability, String name, String user, @@ -674,7 +685,8 @@ public RMApp submitApp(Resource capability, String name, String user, ApplicationId applicationId, long attemptFailuresValidityInterval, LogAggregationContext logAggregationContext, boolean cancelTokensWhenComplete, Priority priority, String amLabel, - Map applicationTimeouts) + Map applicationTimeouts, + ByteBuffer tokensConf) throws Exception { ApplicationId appId = isAppIdProvided ? applicationId : null; ApplicationClientProtocol client = getClientRMService(); @@ -713,6 +725,7 @@ public RMApp submitApp(Resource capability, String name, String user, ts.writeTokenStorageToStream(dob); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); clc.setTokens(securityTokens); + clc.setTokensConf(tokensConf); } sub.setAMContainerSpec(clc); sub.setAttemptFailuresValidityInterval(attemptFailuresValidityInterval); @@ -729,22 +742,20 @@ public RMApp submitApp(Resource capability, String name, String user, req.setApplicationSubmissionContext(sub); UserGroupInformation fakeUser = UserGroupInformation.createUserForTesting(user, new String[] {"someGroup"}); - PrivilegedAction action = - new PrivilegedAction() { + PrivilegedExceptionAction action = + new PrivilegedExceptionAction() { ApplicationClientProtocol client; SubmitApplicationRequest req; @Override - public SubmitApplicationResponse run() { + public SubmitApplicationResponse run() throws IOException, YarnException { try { return client.submitApplication(req); - } catch (YarnException e) { - e.printStackTrace(); - } catch (IOException e) { + } catch (YarnException | IOException e) { e.printStackTrace(); + throw e; } - return null; } - PrivilegedAction setClientReq( + PrivilegedExceptionAction setClientReq( ApplicationClientProtocol client, SubmitApplicationRequest req) { this.client = client; this.req = req; @@ -1224,6 +1235,7 @@ public RMApp submitApp(int masterMemory, Priority priority, null, false, null, super.getConfig().getInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS, YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS), null, null, true, - false, false, null, 0, null, true, priority, null, applicationTimeouts); + false, false, null, 0, null, true, priority, null, applicationTimeouts, + null); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java index 03cc3a0a63d..892f8ba36ec 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java @@ -38,9 +38,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.security.AccessControlException; import org.apache.hadoop.security.Credentials; +import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.service.Service; import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.protocolrecords.SubmitApplicationRequest; @@ -259,6 +261,7 @@ public void setUp() { public void tearDown() { setAppEventType(RMAppEventType.KILL); ((Service)rmContext.getDispatcher()).stop(); + UserGroupInformation.reset(); } @Test @@ -311,13 +314,15 @@ public void testQueueSubmitWithNoPermission() throws IOException { ResourceRequest.ANY, Resource.newInstance(1024, 1), 1); sub.setAMContainerResourceRequest(resReg); req.setApplicationSubmissionContext(sub); + sub.setAMContainerSpec(mock(ContainerLaunchContext.class)); try { rmService.submitApplication(req); } catch (Exception e) { + e.printStackTrace(); if (e instanceof YarnException) { Assert.assertTrue(e.getCause() instanceof AccessControlException); } else { - Assert.fail("Yarn exception is expected"); + Assert.fail("Yarn exception is expected : " + e.getMessage()); } } finally { mockRM.close(); @@ -543,6 +548,10 @@ public void testRMAppSubmitWithInvalidTokens() throws Exception { DataOutputBuffer dob = new DataOutputBuffer(); ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + Configuration conf = new Configuration(); + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + UserGroupInformation.setConfiguration(conf); asContext.getAMContainerSpec().setTokens(securityTokens); try { appMonitor.submitApplication(asContext, "test"); @@ -564,36 +573,6 @@ public void testRMAppSubmitWithInvalidTokens() throws Exception { asContext.getAMContainerSpec().setTokens(null); } - @Test - public void testRMAppSubmitWithValidTokens() throws Exception { - // Setup valid security tokens - DataOutputBuffer dob = new DataOutputBuffer(); - Credentials credentials = new Credentials(); - credentials.writeTokenStorageToStream(dob); - ByteBuffer securityTokens = ByteBuffer.wrap(dob.getData(), 0, - dob.getLength()); - asContext.getAMContainerSpec().setTokens(securityTokens); - appMonitor.submitApplication(asContext, "test"); - RMApp app = rmContext.getRMApps().get(appId); - Assert.assertNotNull("app is null", app); - Assert.assertEquals("app id doesn't match", appId, - app.getApplicationId()); - Assert.assertEquals("app state doesn't match", RMAppState.NEW, - app.getState()); - verify(metricsPublisher).appACLsUpdated( - any(RMApp.class), any(String.class), anyLong()); - - // wait for event to be processed - int timeoutSecs = 0; - while ((getAppEventType() == RMAppEventType.KILL) && - timeoutSecs++ < 20) { - Thread.sleep(1000); - } - Assert.assertEquals("app event type sent is wrong", RMAppEventType.START, - getAppEventType()); - asContext.getAMContainerSpec().setTokens(null); - } - @Test (timeout = 30000) public void testRMAppSubmitMaxAppAttempts() throws Exception { int[] globalMaxAppAttempts = new int[] { 10, 1 }; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java index f0b093abf87..139e2dae111 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMRestart.java @@ -112,6 +112,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.TestUtils; import org.apache.hadoop.yarn.server.resourcemanager.scheduler.common.fica.FiCaSchedulerApp; +import org.apache.hadoop.yarn.server.resourcemanager.security.DelegationTokenRenewer; import org.apache.hadoop.yarn.server.security.ApplicationACLsManager; import org.apache.hadoop.yarn.server.timelineservice.collector.TimelineCollectorContext; import org.apache.hadoop.yarn.server.timelineservice.storage.FileSystemTimelineWriterImpl; @@ -1745,32 +1746,28 @@ public void testAppFailedOnSubmissionSavedInStateStore() throws Exception { memStore.init(conf); MockRM rm1 = new TestSecurityMockRM(conf, memStore) { - @Override - protected RMAppManager createRMAppManager() { - return new TestRMAppManager(this.rmContext, this.scheduler, - this.masterService, this.applicationACLsManager, conf); + class TestDelegationTokenRenewer extends DelegationTokenRenewer { + public void addApplicationAsync(ApplicationId applicationId, Credentials ts, + boolean shouldCancelAtEnd, String user, Configuration appConf) { + throw new RuntimeException("failed to submit app"); + } } - - class TestRMAppManager extends RMAppManager { - - public TestRMAppManager(RMContext context, YarnScheduler scheduler, - ApplicationMasterService masterService, - ApplicationACLsManager applicationACLsManager, Configuration conf) { - super(context, scheduler, masterService, applicationACLsManager, conf); - } - - @Override - protected Credentials parseCredentials( - ApplicationSubmissionContext application) throws IOException { - throw new IOException("Parsing credential error."); - } + @Override + protected DelegationTokenRenewer createDelegationTokenRenewer() { + return new TestDelegationTokenRenewer(); } }; rm1.start(); - RMApp app1 = - rm1.submitApp(200, "name", "user", + RMApp app1 = null; + try { + app1 = rm1.submitApp(200, "name", "user", new HashMap(), false, "default", -1, null, "MAPREDUCE", false); + Assert.fail(); + } catch (Exception e) { + + } + app1 = rm1.getRMContext().getRMApps().values().iterator().next(); rm1.waitForState(app1.getApplicationId(), RMAppState.FAILED); // Check app staet is saved in state store. Assert.assertEquals(RMAppState.FAILED, memStore.getState() diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java index 7dafc220979..55a4eac4eea 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/logaggregationstatus/TestRMAppLogAggregationStatus.java @@ -30,6 +30,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.LogAggregationStatus; import org.apache.hadoop.yarn.api.records.NodeId; @@ -490,9 +491,10 @@ public void testGetLogAggregationStatusForAppReport() { private RMApp createRMApp(Configuration conf) { ApplicationSubmissionContext submissionContext = - ApplicationSubmissionContext.newInstance(appId, "test", "default", - Priority.newInstance(0), null, false, true, - 2, Resource.newInstance(10, 2), "test"); + ApplicationSubmissionContext + .newInstance(appId, "test", "default", Priority.newInstance(0), + mock(ContainerLaunchContext.class), false, true, 2, + Resource.newInstance(10, 2), "test"); return new RMAppImpl(this.appId, this.rmContext, conf, "test", "test", "default", submissionContext, scheduler, diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index c4459ad67c0..488485112cb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -268,7 +268,7 @@ protected RMApp createNewTestApp(ApplicationSubmissionContext submissionContext) // but applicationId is still set for safety submissionContext.setApplicationId(applicationId); submissionContext.setPriority(Priority.newInstance(0)); - + submissionContext.setAMContainerSpec(mock(ContainerLaunchContext.class)); RMApp application = new RMAppImpl(applicationId, rmContext, conf, name, user, queue, submissionContext, scheduler, masterService, System.currentTimeMillis(), "YARN", null, mock(ResourceRequest.class)); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java index 8e8267fe611..8fa2e65046b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerTestBase.java @@ -23,6 +23,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; @@ -244,10 +245,10 @@ protected void createApplicationWithAMResource(ApplicationAttemptId attId, String queue, String user, Resource amResource) { RMContext rmContext = resourceManager.getRMContext(); ApplicationId appId = attId.getApplicationId(); - RMApp rmApp = new RMAppImpl(appId, rmContext, conf, - null, user, null, ApplicationSubmissionContext.newInstance(appId, null, - queue, null, null, false, false, 0, amResource, null), scheduler, null, - 0, null, null, null); + RMApp rmApp = new RMAppImpl(appId, rmContext, conf, null, user, null, + ApplicationSubmissionContext.newInstance(appId, null, queue, null, + mock(ContainerLaunchContext.class), false, false, 0, amResource, + null), scheduler, null, 0, null, null, null); rmContext.getRMApps().put(appId, rmApp); RMAppEvent event = new RMAppEvent(appId, RMAppEventType.START); resourceManager.getRMContext().getRMApps().get(appId).handle(event); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java index 205188b2c02..0190db68dc5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestDelegationTokenRenewer.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager; import org.apache.hadoop.hdfs.server.namenode.FSNamesystem; import org.apache.hadoop.io.DataInputByteBuffer; +import org.apache.hadoop.io.DataOutputBuffer; import org.apache.hadoop.io.Text; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; @@ -392,7 +393,8 @@ public void testDTRenewal () throws Exception { // register the tokens for renewal ApplicationId applicationId_0 = BuilderUtils.newApplicationId(0, 0); - delegationTokenRenewer.addApplicationAsync(applicationId_0, ts, true, "user"); + delegationTokenRenewer.addApplicationAsync(applicationId_0, ts, true, "user", + new Configuration()); waitForEventsToGetProcessed(delegationTokenRenewer); // first 3 initial renewals + 1 real @@ -432,7 +434,8 @@ public void testDTRenewal () throws Exception { ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1); - delegationTokenRenewer.addApplicationAsync(applicationId_1, ts, true, "user"); + delegationTokenRenewer.addApplicationAsync(applicationId_1, ts, true, "user", + new Configuration()); waitForEventsToGetProcessed(delegationTokenRenewer); delegationTokenRenewer.applicationFinished(applicationId_1); waitForEventsToGetProcessed(delegationTokenRenewer); @@ -468,7 +471,8 @@ public void testAppRejectionWithCancelledDelegationToken() throws Exception { // register the tokens for renewal ApplicationId appId = BuilderUtils.newApplicationId(0, 0); - delegationTokenRenewer.addApplicationAsync(appId, ts, true, "user"); + delegationTokenRenewer.addApplicationAsync(appId, ts, true, "user", + new Configuration()); int waitCnt = 20; while (waitCnt-- >0) { if (!eventQueue.isEmpty()) { @@ -531,7 +535,8 @@ public void testDTRenewalWithNoCancel () throws Exception { ApplicationId applicationId_1 = BuilderUtils.newApplicationId(0, 1); - delegationTokenRenewer.addApplicationAsync(applicationId_1, ts, false, "user"); + delegationTokenRenewer.addApplicationAsync(applicationId_1, ts, false, "user", + new Configuration()); waitForEventsToGetProcessed(delegationTokenRenewer); delegationTokenRenewer.applicationFinished(applicationId_1); waitForEventsToGetProcessed(delegationTokenRenewer); @@ -600,7 +605,8 @@ public void testDTKeepAlive1 () throws Exception { // register the tokens for renewal ApplicationId applicationId_0 = BuilderUtils.newApplicationId(0, 0); - localDtr.addApplicationAsync(applicationId_0, ts, true, "user"); + localDtr.addApplicationAsync(applicationId_0, ts, true, "user", + new Configuration()); waitForEventsToGetProcessed(localDtr); if (!eventQueue.isEmpty()){ Event evt = eventQueue.take(); @@ -679,7 +685,8 @@ public void testDTKeepAlive2() throws Exception { // register the tokens for renewal ApplicationId applicationId_0 = BuilderUtils.newApplicationId(0, 0); - localDtr.addApplicationAsync(applicationId_0, ts, true, "user"); + localDtr.addApplicationAsync(applicationId_0, ts, true, "user", + new Configuration()); localDtr.applicationFinished(applicationId_0); waitForEventsToGetProcessed(delegationTokenRenewer); //Send another keep alive. @@ -831,14 +838,16 @@ public Long answer(InvocationOnMock invocation) Thread submitThread = new Thread() { @Override public void run() { - dtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user"); + dtr.addApplicationAsync(mock(ApplicationId.class), creds1, false, "user", + new Configuration()); } }; submitThread.start(); // wait till 1st submit blocks, then submit another startBarrier.await(); - dtr.addApplicationAsync(mock(ApplicationId.class), creds2, false, "user"); + dtr.addApplicationAsync(mock(ApplicationId.class), creds2, false, "user", + new Configuration()); // signal 1st to complete endBarrier.await(); submitThread.join(); @@ -1273,4 +1282,106 @@ public Boolean get() { } }, 10, 10000); } + + // Test DelegationTokenRenewer uses the tokenConf provided by application + // for token renewal. + @Test + public void testRenewTokenUsingTokenConfProvidedByApp() throws Exception{ + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + UserGroupInformation.setConfiguration(conf); + + MockRM rm = new TestSecurityMockRM(conf, null); + rm.start(); + final MockNM nm1 = + new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); + nm1.registerNode(); + + // create a token + Text userText1 = new Text("user1"); + DelegationTokenIdentifier dtId1 = + new DelegationTokenIdentifier(userText1, new Text("renewer1"), + userText1); + final Token token1 = + new Token(dtId1.getBytes(), + "password1".getBytes(), dtId1.getKind(), new Text("service1")); + Credentials credentials = new Credentials(); + credentials.addToken(userText1, token1); + + // create token conf for renewal + Configuration appConf = new Configuration(false); + appConf.set("dfs.nameservices", "mycluster1,mycluster2"); + appConf.set("dfs.namenode.rpc-address.mycluster2.nn1", "123.0.0.1"); + appConf.set("dfs.namenode.rpc-address.mycluster2.nn2", "123.0.0.2"); + appConf.set("dfs.ha.namenodes.mycluster2", "nn1,nn2"); + appConf.set("dfs.client.failover.proxy.provider.mycluster2", "provider"); + DataOutputBuffer dob = new DataOutputBuffer(); + appConf.write(dob); + ByteBuffer tokenConf = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + final int confSize = appConf.size(); + + // submit app + RMApp app = rm.submitApp(credentials, tokenConf); + + GenericTestUtils.waitFor(new Supplier() { + public Boolean get() { + DelegationTokenToRenew toRenew = + rm.getRMContext().getDelegationTokenRenewer().getAllTokens() + .get(token1); + // check app conf size equals to original size and it does contain + // the specific config we added. + return toRenew != null && toRenew.conf != null + && toRenew.conf.size() == confSize && toRenew.conf + .get("dfs.namenode.rpc-address.mycluster2.nn1").equals("123.0.0.1"); + } + }, 200, 10000); + } + + // Test if app's token conf exceeds RM_DELEGATION_TOKEN_MAX_CONF_SIZE, + // app should fail + @Test + public void testTokensConfExceedLimit() throws Exception { + conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION, + "kerberos"); + UserGroupInformation.setConfiguration(conf); + // limit 100 bytes + conf.setInt(YarnConfiguration.RM_DELEGATION_TOKEN_MAX_CONF_SIZE, 100); + MockRM rm = new TestSecurityMockRM(conf, null); + rm.start(); + final MockNM nm1 = + new MockNM("127.0.0.1:1234", 15120, rm.getResourceTrackerService()); + nm1.registerNode(); + + // create a token + Text userText1 = new Text("user1"); + DelegationTokenIdentifier dtId1 = + new DelegationTokenIdentifier(userText1, new Text("renewer1"), + userText1); + final Token token1 = + new Token(dtId1.getBytes(), + "password1".getBytes(), dtId1.getKind(), new Text("service1")); + Credentials credentials = new Credentials(); + credentials.addToken(userText1, token1); + + // create token conf for renewal, total size (512 bytes) > limit (100 bytes) + // By experiment, it's roughly 128 bytes per key-value pair. + Configuration appConf = new Configuration(false); + appConf.clear(); + appConf.set("dfs.nameservices", "mycluster1,mycluster2"); // 128 bytes + appConf.set("dfs.namenode.rpc-address.mycluster2.nn1", "123.0.0.1"); //128 bytes + appConf.set("dfs.namenode.rpc-address.mycluster3.nn2", "123.0.0.2"); // 128 bytes + + DataOutputBuffer dob = new DataOutputBuffer(); + appConf.write(dob); + ByteBuffer tokenConf = ByteBuffer.wrap(dob.getData(), 0, dob.getLength()); + + try { + rm.submitApp(credentials, tokenConf); + Assert.fail(); + } catch (Exception e) { + e.printStackTrace(); + Assert.assertTrue(e.getCause().getMessage() + .contains(YarnConfiguration.RM_DELEGATION_TOKEN_MAX_CONF_SIZE)); + } + } }