diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh index 3800824a82c..4653efdbda9 100755 --- a/dev-support/test-patch.sh +++ b/dev-support/test-patch.sh @@ -980,12 +980,12 @@ fi (( RESULT = RESULT + $JAVAC_RET )) checkJavadocWarnings (( RESULT = RESULT + $? )) -checkEclipseGeneration -(( RESULT = RESULT + $? )) ### Checkstyle not implemented yet #checkStyle #(( RESULT = RESULT + $? )) buildAndInstall +checkEclipseGeneration +(( RESULT = RESULT + $? )) checkFindbugsWarnings (( RESULT = RESULT + $? )) checkReleaseAuditWarnings diff --git a/hadoop-common-project/hadoop-common/CHANGES.txt b/hadoop-common-project/hadoop-common/CHANGES.txt index 2ca826bfb99..0c66e9d4359 100644 --- a/hadoop-common-project/hadoop-common/CHANGES.txt +++ b/hadoop-common-project/hadoop-common/CHANGES.txt @@ -146,6 +146,9 @@ Trunk (Unreleased) HADOOP-9162. Add utility to check native library availability. (Binglin Chang via suresh) + HADOOP-8924. Add maven plugin alternative to shell script to save + package-info.java. (Chris Nauroth via suresh) + BUG FIXES HADOOP-8419. Fixed GzipCode NPE reset for IBM JDK. (Yu Li via eyang) @@ -308,10 +311,13 @@ Trunk (Unreleased) HADOOP-9131. Turn off TestLocalFileSystem#testListStatusWithColons on Windows. (Chris Nauroth via suresh) - HADOOP-8957 AbstractFileSystem#IsValidName should be overridden for + HADOOP-8957. AbstractFileSystem#IsValidName should be overridden for embedded file systems like ViewFs (Chris Nauroth via Sanjay Radia) - HADOOP-9139 improve killKdc.sh (Ivan A. Veselovsky via bobby) + HADOOP-9139. improve killKdc.sh (Ivan A. Veselovsky via bobby) + + HADOOP-9202. test-patch.sh fails during mvn eclipse:eclipse if patch adds + a new module to the build (Chris Nauroth via bobby) OPTIMIZATIONS @@ -323,6 +329,8 @@ Release 2.0.3-alpha - Unreleased INCOMPATIBLE CHANGES + HADOOP-8999. SASL negotiation is flawed (daryn) + NEW FEATURES HADOOP-8597. Permit FsShell's text command to read Avro files. @@ -433,6 +441,18 @@ Release 2.0.3-alpha - Unreleased HADOOP-9192. Move token related request/response messages to common. (suresh) + HADOOP-8712. Change default hadoop.security.group.mapping to + JniBasedUnixGroupsNetgroupMappingWithFallback (Robert Parker via todd) + + HADOOP-9106. Allow configuration of IPC connect timeout. + (Rober Parker via suresh) + + HADOOP-9216. CompressionCodecFactory#getCodecClasses should trim the + result of parsing by Configuration. (Tsuyoshi Ozawa via todd) + + HADOOP-9231. Parametrize staging URL for the uniformity of + distributionManagement. (Konstantin Boudnik via suresh) + OPTIMIZATIONS HADOOP-8866. SampleQuantiles#query is O(N^2) instead of O(N). (Andrew Wang @@ -493,8 +513,6 @@ Release 2.0.3-alpha - Unreleased HADOOP-7115. Add a cache for getpwuid_r and getpwgid_r calls (tucu) - HADOOP-8999. SASL negotiation is flawed (daryn) - HADOOP-6607. Add different variants of non caching HTTP headers. (tucu) HADOOP-9049. DelegationTokenRenewer needs to be Singleton and FileSystems @@ -537,6 +555,23 @@ Release 2.0.3-alpha - Unreleased HADOOP-9183. Potential deadlock in ActiveStandbyElector. (tomwhite) + HADOOP-9203. RPCCallBenchmark should find a random available port. + (Andrew Purtell via suresh) + + HADOOP-9178. src/main/conf is missing hadoop-policy.xml. + (Sandy Ryza via eli) + + HADOOP-8816. HTTP Error 413 full HEAD if using kerberos authentication. + (moritzmoeller via tucu) + + HADOOP-9212. Potential deadlock in FileSystem.Cache/IPC/UGI. (tomwhite) + + HADOOP-9193. hadoop script can inadvertently expand wildcard arguments + when delegating to hdfs script. (Andy Isaacson via todd) + + HADOOP-9215. when using cmake-2.6, libhadoop.so doesn't get created + (only libhadoop.so.1.0.0) (Colin Patrick McCabe via todd) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -1227,6 +1262,21 @@ Release 2.0.0-alpha - 05-23-2012 HADOOP-8655. Fix TextInputFormat for large deliminators. (Gelesh via bobby) +Release 0.23.7 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + HADOOP-8849. FileUtil#fullyDelete should grant the target directories +rwx + permissions (Ivan A. Veselovsky via bobby) + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES @@ -1234,6 +1284,8 @@ Release 0.23.6 - UNRELEASED NEW FEATURES IMPROVEMENTS + HADOOP-9217. Print thread dumps when hadoop-common tests fail. + (Andrey Klochkov via suresh) OPTIMIZATIONS @@ -1250,7 +1302,10 @@ Release 0.23.6 - UNRELEASED HADOOP-9105. FsShell -moveFromLocal erroneously fails (daryn via bobby) -Release 0.23.5 - UNRELEASED + HADOOP-9097. Maven RAT plugin is not checking all source files (tgraves) + +Release 0.23.5 - 2012-11-28 + INCOMPATIBLE CHANGES diff --git a/hadoop-common-project/hadoop-common/dev-support/saveVersion.sh b/hadoop-common-project/hadoop-common/dev-support/saveVersion.sh deleted file mode 100755 index d11a4cf75c2..00000000000 --- a/hadoop-common-project/hadoop-common/dev-support/saveVersion.sh +++ /dev/null @@ -1,67 +0,0 @@ -#!/bin/sh - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# This file is used to generate the package-info.java class that -# records the version, revision, branch, user, timestamp, and url -unset LANG -unset LC_CTYPE -unset LC_TIME -version=$1 -build_dir=$2 -user=`whoami | tr '\n\r' '\n'` -date=`date` -cwd=`pwd` -if git rev-parse HEAD 2>/dev/null > /dev/null ; then - revision=`git log -1 --pretty=format:"%H"` - hostname=`hostname` - branch=`git branch | sed -n -e 's/^* //p'` - url="git://${hostname}${cwd}" -elif [ -d .svn ]; then - revision=`svn info | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'` - url=`svn info | sed -n -e 's/^URL: \(.*\)/\1/p'` - # Get canonical branch (branches/X, tags/X, or trunk) - branch=`echo $url | sed -n -e 's,.*\(branches/.*\)$,\1,p' \ - -e 's,.*\(tags/.*\)$,\1,p' \ - -e 's,.*trunk$,trunk,p'` -else - revision="Unknown" - branch="Unknown" - url="file://$cwd" -fi - -which md5sum > /dev/null -if [ "$?" = "0" ] ; then - srcChecksum=`find src/main/java -name '*.java' | LC_ALL=C sort | xargs md5sum | md5sum | cut -d ' ' -f 1` -else - srcChecksum="Not Available" -fi - -mkdir -p $build_dir/org/apache/hadoop -cat << EOF | \ - sed -e "s/VERSION/$version/" -e "s/USER/$user/" -e "s/DATE/$date/" \ - -e "s|URL|$url|" -e "s/REV/$revision/" \ - -e "s|BRANCH|$branch|" -e "s/SRCCHECKSUM/$srcChecksum/" \ - > $build_dir/org/apache/hadoop/package-info.java -/* - * Generated by src/saveVersion.sh - */ -@HadoopVersionAnnotation(version="VERSION", revision="REV", branch="BRANCH", - user="USER", date="DATE", url="URL", - srcChecksum="SRCCHECKSUM") -package org.apache.hadoop; -EOF diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml index a4f7ceabc00..06420f2e6f7 100644 --- a/hadoop-common-project/hadoop-common/pom.xml +++ b/hadoop-common-project/hadoop-common/pom.xml @@ -244,7 +244,51 @@ + + + + ${basedir}/src/main/resources + + common-version-info.properties + + false + + + ${basedir}/src/main/resources + + common-version-info.properties + + true + + + + org.apache.hadoop + hadoop-maven-plugins + + + version-info + + version-info + + + + ${basedir}/src/main + + java/**/*.java + proto/**/*.proto + + + + + + org.apache.maven.plugins maven-surefire-plugin @@ -288,22 +332,6 @@ - - save-version - generate-sources - - run - - - - - - - - - - generate-test-sources generate-test-sources @@ -445,13 +473,26 @@ dev-support/jdiff/** src/main/native/* src/main/native/config/* - src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo src/main/native/m4/* src/test/empty-file src/test/all-tests + src/test/resources/kdc/ldif/users.ldif + src/main/native/src/org/apache/hadoop/io/compress/lz4/lz4.c + + org.apache.maven.plugins + maven-surefire-plugin + + + + listener + org.apache.hadoop.test.TimedOutTestsListener + + + + @@ -513,6 +554,9 @@ + + diff --git a/hadoop-common-project/hadoop-common/src/config.h.cmake b/hadoop-common-project/hadoop-common/src/config.h.cmake index 7423de73a82..e720d306570 100644 --- a/hadoop-common-project/hadoop-common/src/config.h.cmake +++ b/hadoop-common-project/hadoop-common/src/config.h.cmake @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #ifndef CONFIG_H #define CONFIG_H diff --git a/hadoop-common-project/hadoop-common/src/main/bin/hadoop b/hadoop-common-project/hadoop-common/src/main/bin/hadoop index 17b41f77bef..8ed9b3f3466 100755 --- a/hadoop-common-project/hadoop-common/src/main/bin/hadoop +++ b/hadoop-common-project/hadoop-common/src/main/bin/hadoop @@ -58,9 +58,9 @@ case $COMMAND in #try to locate hdfs and if present, delegate to it. shift if [ -f "${HADOOP_HDFS_HOME}"/bin/hdfs ]; then - exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups} $* + exec "${HADOOP_HDFS_HOME}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@" elif [ -f "${HADOOP_PREFIX}"/bin/hdfs ]; then - exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} $* + exec "${HADOOP_PREFIX}"/bin/hdfs ${COMMAND/dfsgroups/groups} "$@" else echo "HADOOP_HDFS_HOME not found!" exit 1 @@ -75,9 +75,9 @@ case $COMMAND in #try to locate mapred and if present, delegate to it. shift if [ -f "${HADOOP_MAPRED_HOME}"/bin/mapred ]; then - exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} $* + exec "${HADOOP_MAPRED_HOME}"/bin/mapred ${COMMAND/mrgroups/groups} "$@" elif [ -f "${HADOOP_PREFIX}"/bin/mapred ]; then - exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} $* + exec "${HADOOP_PREFIX}"/bin/mapred ${COMMAND/mrgroups/groups} "$@" else echo "HADOOP_MAPRED_HOME not found!" exit 1 diff --git a/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml new file mode 100644 index 00000000000..4dd5b9fbb02 --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/conf/hadoop-policy.xml @@ -0,0 +1,219 @@ + + + + + + + + + security.client.protocol.acl + * + ACL for ClientProtocol, which is used by user code + via the DistributedFileSystem. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.client.datanode.protocol.acl + * + ACL for ClientDatanodeProtocol, the client-to-datanode protocol + for block recovery. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.datanode.protocol.acl + * + ACL for DatanodeProtocol, which is used by datanodes to + communicate with the namenode. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.inter.datanode.protocol.acl + * + ACL for InterDatanodeProtocol, the inter-datanode protocol + for updating generation timestamp. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.namenode.protocol.acl + * + ACL for NamenodeProtocol, the protocol used by the secondary + namenode to communicate with the namenode. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.admin.operations.protocol.acl + ${HADOOP_HDFS_USER} + ACL for AdminOperationsProtocol. Used for admin commands. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.refresh.usertogroups.mappings.protocol.acl + ${HADOOP_HDFS_USER} + ACL for RefreshUserMappingsProtocol. Used to refresh + users mappings. The ACL is a comma-separated list of user and + group names. The user and group list is separated by a blank. For + e.g. "alice,bob users,wheel". A special value of "*" means all + users are allowed. + + + + security.refresh.policy.protocol.acl + ${HADOOP_HDFS_USER} + ACL for RefreshAuthorizationPolicyProtocol, used by the + dfsadmin and mradmin commands to refresh the security policy in-effect. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.ha.service.protocol.acl + * + ACL for HAService protocol used by HAAdmin to manage the + active and stand-by states of namenode. + + + + security.zkfc.protocol.acl + * + ACL for access to the ZK Failover Controller + + + + + security.qjournal.service.protocol.acl + ${HADOOP_HDFS_USER} + ACL for QJournalProtocol, used by the NN to communicate with + JNs when using the QuorumJournalManager for edit logs. + + + + security.mrhs.client.protocol.acl + * + ACL for HSClientProtocol, used by job clients to + communciate with the MR History Server job status etc. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + + + security.resourcetracker.protocol.acl + ${HADOOP_YARN_USER} + ACL for ResourceTracker protocol, used by the + ResourceManager and NodeManager to communicate with each other. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.admin.protocol.acl + ${HADOOP_YARN_USER} + ACL for RMAdminProtocol, for admin commands. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.client.resourcemanager.protocol.acl + * + ACL for ClientRMProtocol, used by the ResourceManager + and applications submission clients to communicate with each other. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.applicationmaster.resourcemanager.protocol.acl + * + ACL for AMRMProtocol, used by the ResourceManager + and ApplicationMasters to communicate with each other. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.containermanager.protocol.acl + * + ACL for ContainerManager protocol, used by the NodeManager + and ApplicationMasters to communicate with each other. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.resourcelocalizer.protocol.acl + * + ACL for ResourceLocalizer protocol, used by the NodeManager + and ResourceLocalizer to communicate with each other. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.job.task.protocol.acl + * + ACL for TaskUmbilicalProtocol, used by the map and reduce + tasks to communicate with the parent tasktracker. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + + security.job.client.protocol.acl + * + ACL for MRClientProtocol, used by job clients to + communciate with the MR ApplicationMaster to query job status etc. + The ACL is a comma-separated list of user and group names. The user and + group list is separated by a blank. For e.g. "alice,bob users,wheel". + A special value of "*" means all users are allowed. + + + diff --git a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml index 771ac052b33..6716c486025 100644 --- a/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml +++ b/hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml @@ -116,22 +116,6 @@ ACL for NamenodeProtocol, the protocol used by the secondary namenode to communicate with the namenode. - - security.inter.tracker.protocol.acl - ACL for InterTrackerProtocol, used by the tasktrackers to - communicate with the jobtracker. - - - security.job.submission.protocol.acl - ACL for JobSubmissionProtocol, used by job clients to - communciate with the jobtracker for job submission, querying job status - etc. - - - security.task.umbilical.protocol.acl - ACL for TaskUmbilicalProtocol, used by the map and reduce - tasks to communicate with the parent tasktracker. - security.refresh.policy.protocol.acl ACL for RefreshAuthorizationPolicyProtocol, used by the diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/HadoopVersionAnnotation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/HadoopVersionAnnotation.java deleted file mode 100644 index 132210f1a9f..00000000000 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/HadoopVersionAnnotation.java +++ /dev/null @@ -1,74 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop; - -import java.lang.annotation.*; - -import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.classification.InterfaceStability; - -/** - * A package attribute that captures the version of Hadoop that was compiled. - */ -@Retention(RetentionPolicy.RUNTIME) -@Target(ElementType.PACKAGE) -@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"}) -@InterfaceStability.Unstable -public @interface HadoopVersionAnnotation { - - /** - * Get the Hadoop version - * @return the version string "0.6.3-dev" - */ - String version(); - - /** - * Get the username that compiled Hadoop. - */ - String user(); - - /** - * Get the date when Hadoop was compiled. - * @return the date in unix 'date' format - */ - String date(); - - /** - * Get the url for the subversion repository. - */ - String url(); - - /** - * Get the subversion revision. - * @return the revision number as a string (eg. "451451") - */ - String revision(); - - /** - * Get the branch from which this was compiled. - * @return The branch name, e.g. "trunk" or "branches/branch-0.20" - */ - String branch(); - - /** - * Get a checksum of the source files from which - * Hadoop was compiled. - * @return a string that uniquely identifies the source - **/ - String srcChecksum(); -} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java index a7579a96406..daa57af2c9c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java @@ -21,6 +21,7 @@ package org.apache.hadoop.fs; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.http.lib.StaticUserWebFilter; +import org.apache.hadoop.security.authorize.Service; /** * This class contains constants for configuration keys used @@ -114,7 +115,18 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl"; public static final String SECURITY_ZKFC_PROTOCOL_ACL = "security.zkfc.protocol.acl"; - + public static final String + SECURITY_CLIENT_PROTOCOL_ACL = "security.client.protocol.acl"; + public static final String SECURITY_CLIENT_DATANODE_PROTOCOL_ACL = + "security.client.datanode.protocol.acl"; + public static final String + SECURITY_DATANODE_PROTOCOL_ACL = "security.datanode.protocol.acl"; + public static final String + SECURITY_INTER_DATANODE_PROTOCOL_ACL = "security.inter.datanode.protocol.acl"; + public static final String + SECURITY_NAMENODE_PROTOCOL_ACL = "security.namenode.protocol.acl"; + public static final String SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL = + "security.qjournal.service.protocol.acl"; public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP = "hadoop.security.token.service.use_ip"; public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT = @@ -191,4 +203,4 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic { public static final long HADOOP_SECURITY_UID_NAME_CACHE_TIMEOUT_DEFAULT = 4*60*60; // 4 hours -} \ No newline at end of file +} diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java index 5dc5e1a1c8c..3a236cbc278 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java @@ -173,6 +173,11 @@ public class CommonConfigurationKeysPublic { /** Default value for IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY */ public static final int IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT = 10000; // 10s /** See core-default.xml */ + public static final String IPC_CLIENT_CONNECT_TIMEOUT_KEY = + "ipc.client.connect.timeout"; + /** Default value for IPC_CLIENT_CONNECT_TIMEOUT_KEY */ + public static final int IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT = 20000; // 20s + /** See core-default.xml */ public static final String IPC_CLIENT_CONNECT_MAX_RETRIES_KEY = "ipc.client.connect.max.retries"; /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */ diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java index b6a2acae491..4593eedb9fb 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileUtil.java @@ -87,33 +87,98 @@ public class FileUtil { * (4) If dir is a normal directory, then dir and all its contents recursively * are deleted. */ - public static boolean fullyDelete(File dir) { - if (dir.delete()) { + public static boolean fullyDelete(final File dir) { + return fullyDelete(dir, false); + } + + /** + * Delete a directory and all its contents. If + * we return false, the directory may be partially-deleted. + * (1) If dir is symlink to a file, the symlink is deleted. The file pointed + * to by the symlink is not deleted. + * (2) If dir is symlink to a directory, symlink is deleted. The directory + * pointed to by symlink is not deleted. + * (3) If dir is a normal file, it is deleted. + * (4) If dir is a normal directory, then dir and all its contents recursively + * are deleted. + * @param dir the file or directory to be deleted + * @param tryGrantPermissions true if permissions should be modified to delete a file. + * @return true on success false on failure. + */ + public static boolean fullyDelete(final File dir, boolean tryGrantPermissions) { + if (tryGrantPermissions) { + // try to chmod +rwx the parent folder of the 'dir': + File parent = dir.getParentFile(); + grantPermissions(parent); + } + if (deleteImpl(dir, false)) { // dir is (a) normal file, (b) symlink to a file, (c) empty directory or // (d) symlink to a directory return true; } - // handle nonempty directory deletion - if (!fullyDeleteContents(dir)) { + if (!fullyDeleteContents(dir, tryGrantPermissions)) { return false; } - return dir.delete(); + return deleteImpl(dir, true); + } + + /* + * Pure-Java implementation of "chmod +rwx f". + */ + private static void grantPermissions(final File f) { + f.setExecutable(true); + f.setReadable(true); + f.setWritable(true); } + private static boolean deleteImpl(final File f, final boolean doLog) { + if (f == null) { + LOG.warn("null file argument."); + return false; + } + final boolean wasDeleted = f.delete(); + if (wasDeleted) { + return true; + } + final boolean ex = f.exists(); + if (doLog && ex) { + LOG.warn("Failed to delete file or dir [" + + f.getAbsolutePath() + "]: it still exists."); + } + return !ex; + } + /** * Delete the contents of a directory, not the directory itself. If * we return false, the directory may be partially-deleted. * If dir is a symlink to a directory, all the contents of the actual * directory pointed to by dir will be deleted. */ - public static boolean fullyDeleteContents(File dir) { + public static boolean fullyDeleteContents(final File dir) { + return fullyDeleteContents(dir, false); + } + + /** + * Delete the contents of a directory, not the directory itself. If + * we return false, the directory may be partially-deleted. + * If dir is a symlink to a directory, all the contents of the actual + * directory pointed to by dir will be deleted. + * @param tryGrantPermissions if 'true', try grant +rwx permissions to this + * and all the underlying directories before trying to delete their contents. + */ + public static boolean fullyDeleteContents(final File dir, final boolean tryGrantPermissions) { + if (tryGrantPermissions) { + // to be able to list the dir and delete files from it + // we must grant the dir rwx permissions: + grantPermissions(dir); + } boolean deletionSucceeded = true; - File contents[] = dir.listFiles(); + final File[] contents = dir.listFiles(); if (contents != null) { for (int i = 0; i < contents.length; i++) { if (contents[i].isFile()) { - if (!contents[i].delete()) {// normal file or symlink to another file + if (!deleteImpl(contents[i], true)) {// normal file or symlink to another file deletionSucceeded = false; continue; // continue deletion of other files/dirs under dir } @@ -121,16 +186,16 @@ public class FileUtil { // Either directory or symlink to another directory. // Try deleting the directory as this might be a symlink boolean b = false; - b = contents[i].delete(); + b = deleteImpl(contents[i], false); if (b){ //this was indeed a symlink or an empty directory continue; } // if not an empty directory or symlink let // fullydelete handle it. - if (!fullyDelete(contents[i])) { + if (!fullyDelete(contents[i], tryGrantPermissions)) { deletionSucceeded = false; - continue; // continue deletion of other files/dirs under dir + // continue deletion of other files/dirs under dir } } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java index c14ea99a88a..fc1d7c4717c 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java @@ -305,6 +305,7 @@ public class HttpServer implements FilterContainer { ret.setAcceptQueueSize(128); ret.setResolveNames(false); ret.setUseDirectBuffers(false); + ret.setHeaderBufferSize(1024*64); return ret; } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java index 57fb366bdd0..eb35759c9c8 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java @@ -122,7 +122,7 @@ public class CompressionCodecFactory { if (codecsString != null) { StringTokenizer codecSplit = new StringTokenizer(codecsString, ","); while (codecSplit.hasMoreElements()) { - String codecSubstring = codecSplit.nextToken(); + String codecSubstring = codecSplit.nextToken().trim(); if (codecSubstring.length() != 0) { try { Class cls = conf.getClassByName(codecSubstring); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java index f5376a33962..36ea776b777 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java @@ -106,6 +106,8 @@ public class Client { private SocketFactory socketFactory; // how to create sockets private int refCount = 1; + + private final int connectionTimeout; final static int PING_CALL_ID = -1; @@ -159,7 +161,16 @@ public class Client { } return -1; } - + /** + * set the connection timeout value in configuration + * + * @param conf Configuration + * @param timeout the socket connect timeout value + */ + public static final void setConnectTimeout(Configuration conf, int timeout) { + conf.setInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, timeout); + } + /** * Increment this client's reference count * @@ -494,8 +505,7 @@ public class Client { } } - // connection time out is 20s - NetUtils.connect(this.socket, server, 20000); + NetUtils.connect(this.socket, server, connectionTimeout); if (rpcTimeout > 0) { pingInterval = rpcTimeout; // rpcTimeout overwrites pingInterval } @@ -1034,6 +1044,8 @@ public class Client { this.valueClass = valueClass; this.conf = conf; this.socketFactory = factory; + this.connectionTimeout = conf.getInt(CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_KEY, + CommonConfigurationKeys.IPC_CLIENT_CONNECT_TIMEOUT_DEFAULT); } /** diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java index d61ac9b5023..29d8af9fd7f 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java @@ -25,6 +25,7 @@ import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.NetworkInterface; import java.net.NoRouteToHostException; +import java.net.ServerSocket; import java.net.Socket; import java.net.SocketAddress; import java.net.SocketException; @@ -865,4 +866,23 @@ public class NetUtils { } return addrs; } + + /** + * Return a free port number. There is no guarantee it will remain free, so + * it should be used immediately. + * + * @returns A free port for binding a local socket + */ + public static int getFreeSocketPort() { + int port = 0; + try { + ServerSocket s = new ServerSocket(0); + port = s.getLocalPort(); + s.close(); + return port; + } catch (IOException e) { + // Could not get a free port. Return default port 0. + } + return port; + } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java index a258c7f88ca..0745bed83a5 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Credentials.java @@ -18,10 +18,13 @@ package org.apache.hadoop.security; +import java.io.BufferedInputStream; import java.io.DataInput; import java.io.DataInputStream; import java.io.DataOutput; import java.io.DataOutputStream; +import java.io.File; +import java.io.FileInputStream; import java.io.IOException; import java.util.Arrays; import java.util.Collection; @@ -148,8 +151,32 @@ public class Credentials implements Writable { in.close(); return credentials; } catch(IOException ioe) { - IOUtils.cleanup(LOG, in); throw new IOException("Exception reading " + filename, ioe); + } finally { + IOUtils.cleanup(LOG, in); + } + } + + /** + * Convenience method for reading a token storage file, and loading the Tokens + * therein in the passed UGI + * @param filename + * @param conf + * @throws IOException + */ + public static Credentials readTokenStorageFile(File filename, Configuration conf) + throws IOException { + DataInputStream in = null; + Credentials credentials = new Credentials(); + try { + in = new DataInputStream(new BufferedInputStream( + new FileInputStream(filename))); + credentials.readTokenStorageStream(in); + return credentials; + } catch(IOException ioe) { + throw new IOException("Exception reading " + filename, ioe); + } finally { + IOUtils.cleanup(LOG, in); } } diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java index b5cb5b518a4..b7f87e5ea89 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java @@ -20,6 +20,7 @@ package org.apache.hadoop.security; import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN; import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_KERBEROS_MIN_SECONDS_BEFORE_RELOGIN_DEFAULT; +import java.io.File; import java.io.IOException; import java.lang.reflect.UndeclaredThrowableException; import java.security.AccessControlContext; @@ -656,10 +657,11 @@ public class UserGroupInformation { String fileLocation = System.getenv(HADOOP_TOKEN_FILE_LOCATION); if (fileLocation != null) { - // load the token storage file and put all of the tokens into the - // user. + // Load the token storage file and put all of the tokens into the + // user. Don't use the FileSystem API for reading since it has a lock + // cycle (HADOOP-9212). Credentials cred = Credentials.readTokenStorageFile( - new Path("file:///" + fileLocation), conf); + new File(fileLocation), conf); loginUser.addCredentials(cred); } loginUser.spawnAutoRenewalThreadForUserCreds(); diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java index 7bde3ade14c..f2415590b0d 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java @@ -20,41 +20,78 @@ package org.apache.hadoop.util; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.HadoopVersionAnnotation; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; +import java.io.IOException; +import java.io.InputStream; +import java.util.Properties; + /** - * This class finds the package info for Hadoop and the HadoopVersionAnnotation - * information. + * This class returns build information about Hadoop components. */ @InterfaceAudience.Private @InterfaceStability.Unstable public class VersionInfo { private static final Log LOG = LogFactory.getLog(VersionInfo.class); - private static Package myPackage; - private static HadoopVersionAnnotation version; - - static { - myPackage = HadoopVersionAnnotation.class.getPackage(); - version = myPackage.getAnnotation(HadoopVersionAnnotation.class); + private Properties info; + + protected VersionInfo(String component) { + info = new Properties(); + String versionInfoFile = component + "-version-info.properties"; + try { + InputStream is = Thread.currentThread().getContextClassLoader() + .getResourceAsStream(versionInfoFile); + info.load(is); + } catch (IOException ex) { + LogFactory.getLog(getClass()).warn("Could not read '" + + versionInfoFile + "', " + ex.toString(), ex); + } } - /** - * Get the meta-data for the Hadoop package. - * @return - */ - static Package getPackage() { - return myPackage; + protected String _getVersion() { + return info.getProperty("version", "Unknown"); } - + + protected String _getRevision() { + return info.getProperty("revision", "Unknown"); + } + + protected String _getBranch() { + return info.getProperty("branch", "Unknown"); + } + + protected String _getDate() { + return info.getProperty("date", "Unknown"); + } + + protected String _getUser() { + return info.getProperty("user", "Unknown"); + } + + protected String _getUrl() { + return info.getProperty("url", "Unknown"); + } + + protected String _getSrcChecksum() { + return info.getProperty("srcChecksum", "Unknown"); + } + + protected String _getBuildVersion(){ + return getVersion() + + " from " + _getRevision() + + " by " + _getUser() + + " source checksum " + _getSrcChecksum(); + } + + private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common"); /** * Get the Hadoop version. * @return the Hadoop version string, eg. "0.6.3-dev" */ public static String getVersion() { - return version != null ? version.version() : "Unknown"; + return COMMON_VERSION_INFO._getVersion(); } /** @@ -62,7 +99,7 @@ public class VersionInfo { * @return the revision number, eg. "451451" */ public static String getRevision() { - return version != null ? version.revision() : "Unknown"; + return COMMON_VERSION_INFO._getRevision(); } /** @@ -70,7 +107,7 @@ public class VersionInfo { * @return The branch name, e.g. "trunk" or "branches/branch-0.20" */ public static String getBranch() { - return version != null ? version.branch() : "Unknown"; + return COMMON_VERSION_INFO._getBranch(); } /** @@ -78,7 +115,7 @@ public class VersionInfo { * @return the compilation date in unix date format */ public static String getDate() { - return version != null ? version.date() : "Unknown"; + return COMMON_VERSION_INFO._getDate(); } /** @@ -86,14 +123,14 @@ public class VersionInfo { * @return the username of the user */ public static String getUser() { - return version != null ? version.user() : "Unknown"; + return COMMON_VERSION_INFO._getUser(); } /** * Get the subversion URL for the root Hadoop directory. */ public static String getUrl() { - return version != null ? version.url() : "Unknown"; + return COMMON_VERSION_INFO._getUrl(); } /** @@ -101,7 +138,7 @@ public class VersionInfo { * built. **/ public static String getSrcChecksum() { - return version != null ? version.srcChecksum() : "Unknown"; + return COMMON_VERSION_INFO._getSrcChecksum(); } /** @@ -109,14 +146,11 @@ public class VersionInfo { * revision, user and date. */ public static String getBuildVersion(){ - return VersionInfo.getVersion() + - " from " + VersionInfo.getRevision() + - " by " + VersionInfo.getUser() + - " source checksum " + VersionInfo.getSrcChecksum(); + return COMMON_VERSION_INFO._getBuildVersion(); } public static void main(String[] args) { - LOG.debug("version: "+ version); + LOG.debug("version: "+ getVersion()); System.out.println("Hadoop " + getVersion()); System.out.println("Subversion " + getUrl() + " -r " + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); diff --git a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 5295f3be2fe..f7f3ec255df 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-common-project/hadoop-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.security.AnnotatedSecurityInfo diff --git a/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties new file mode 100644 index 00000000000..9a8575c6dea --- /dev/null +++ b/hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version=${pom.version} +revision=${version-info.scm.commit} +branch=${version-info.scm.branch} +user=${user.name} +date=${version-info.build.time} +url=${version-info.scm.uri} +srcChecksum=${version-info.source.md5} diff --git a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml index b020610ba79..fe2902bc597 100644 --- a/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml +++ b/hadoop-common-project/hadoop-common/src/main/resources/core-default.xml @@ -80,9 +80,17 @@ hadoop.security.group.mapping - org.apache.hadoop.security.ShellBasedUnixGroupsMapping + org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback - Class for user to group mapping (get groups for a given user) for ACL + Class for user to group mapping (get groups for a given user) for ACL. + The default implementation, + org.apache.hadoop.security.JniBasedUnixGroupsMappingWithFallback, + will determine if the Java Native Interface (JNI) is available. If JNI is + available the implementation will use the API within hadoop to resolve a + list of groups for a user. If JNI is not available then the shell + implementation, ShellBasedUnixGroupsMapping, is used. This implementation + shells out to the Linux/Unix environment with the + bash -c groups command to resolve a list of groups for a user. @@ -565,6 +573,14 @@ + + ipc.client.connect.timeout + 20000 + Indicates the number of milliseconds a client will wait for the + socket to establish a server connection. + + + ipc.client.connect.max.retries.on.timeouts 45 diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java index 3f1d34e99b7..5bd94c3ef43 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestDelegationTokenRenewer.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.fs; import java.io.FileNotFoundException; diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java index 90db2d0526b..a64b45d80ff 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestFileUtil.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.fs; +import org.junit.Before; import java.io.BufferedReader; import java.io.File; import java.io.FileReader; @@ -173,12 +174,26 @@ public class TestFileUtil { //Expected an IOException } } + + @Before + public void before() throws IOException { + cleanupImpl(); + } @After public void tearDown() throws IOException { - FileUtil.fullyDelete(del); - FileUtil.fullyDelete(tmp); - FileUtil.fullyDelete(partitioned); + cleanupImpl(); + } + + private void cleanupImpl() throws IOException { + FileUtil.fullyDelete(del, true); + Assert.assertTrue(!del.exists()); + + FileUtil.fullyDelete(tmp, true); + Assert.assertTrue(!tmp.exists()); + + FileUtil.fullyDelete(partitioned, true); + Assert.assertTrue(!partitioned.exists()); } @Test @@ -269,12 +284,14 @@ public class TestFileUtil { Assert.assertTrue(new File(tmp, FILE).exists()); } - private File xSubDir = new File(del, "xsubdir"); - private File ySubDir = new File(del, "ysubdir"); - static String file1Name = "file1"; - private File file2 = new File(xSubDir, "file2"); - private File file3 = new File(ySubDir, "file3"); - private File zlink = new File(del, "zlink"); + private final File xSubDir = new File(del, "xSubDir"); + private final File xSubSubDir = new File(xSubDir, "xSubSubDir"); + private final File ySubDir = new File(del, "ySubDir"); + private static final String file1Name = "file1"; + private final File file2 = new File(xSubDir, "file2"); + private final File file22 = new File(xSubSubDir, "file22"); + private final File file3 = new File(ySubDir, "file3"); + private final File zlink = new File(del, "zlink"); /** * Creates a directory which can not be deleted completely. @@ -286,10 +303,14 @@ public class TestFileUtil { * | * .---------------------------------------, * | | | | - * file1(!w) xsubdir(-w) ysubdir(+w) zlink - * | | - * file2 file3 - * + * file1(!w) xSubDir(-rwx) ySubDir(+w) zlink + * | | | + * | file2(-rwx) file3 + * | + * xSubSubDir(-rwx) + * | + * file22(-rwx) + * * @throws IOException */ private void setupDirsAndNonWritablePermissions() throws IOException { @@ -302,7 +323,16 @@ public class TestFileUtil { xSubDir.mkdirs(); file2.createNewFile(); - xSubDir.setWritable(false); + + xSubSubDir.mkdirs(); + file22.createNewFile(); + + revokePermissions(file22); + revokePermissions(xSubSubDir); + + revokePermissions(file2); + revokePermissions(xSubDir); + ySubDir.mkdirs(); file3.createNewFile(); @@ -314,23 +344,43 @@ public class TestFileUtil { FileUtil.symLink(tmpFile.toString(), zlink.toString()); } + private static void grantPermissions(final File f) { + f.setReadable(true); + f.setWritable(true); + f.setExecutable(true); + } + + private static void revokePermissions(final File f) { + f.setWritable(false); + f.setExecutable(false); + f.setReadable(false); + } + // Validates the return value. - // Validates the existence of directory "xsubdir" and the file "file1" - // Sets writable permissions for the non-deleted dir "xsubdir" so that it can - // be deleted in tearDown(). - private void validateAndSetWritablePermissions(boolean ret) { - xSubDir.setWritable(true); - Assert.assertFalse("The return value should have been false!", ret); - Assert.assertTrue("The file file1 should not have been deleted!", + // Validates the existence of the file "file1" + private void validateAndSetWritablePermissions( + final boolean expectedRevokedPermissionDirsExist, final boolean ret) { + grantPermissions(xSubDir); + grantPermissions(xSubSubDir); + + Assert.assertFalse("The return value should have been false.", ret); + Assert.assertTrue("The file file1 should not have been deleted.", new File(del, file1Name).exists()); - Assert.assertTrue( - "The directory xsubdir should not have been deleted!", - xSubDir.exists()); - Assert.assertTrue("The file file2 should not have been deleted!", - file2.exists()); - Assert.assertFalse("The directory ysubdir should have been deleted!", + + Assert.assertEquals( + "The directory xSubDir *should* not have been deleted.", + expectedRevokedPermissionDirsExist, xSubDir.exists()); + Assert.assertEquals("The file file2 *should* not have been deleted.", + expectedRevokedPermissionDirsExist, file2.exists()); + Assert.assertEquals( + "The directory xSubSubDir *should* not have been deleted.", + expectedRevokedPermissionDirsExist, xSubSubDir.exists()); + Assert.assertEquals("The file file22 *should* not have been deleted.", + expectedRevokedPermissionDirsExist, file22.exists()); + + Assert.assertFalse("The directory ySubDir should have been deleted.", ySubDir.exists()); - Assert.assertFalse("The link zlink should have been deleted!", + Assert.assertFalse("The link zlink should have been deleted.", zlink.exists()); } @@ -339,7 +389,15 @@ public class TestFileUtil { LOG.info("Running test to verify failure of fullyDelete()"); setupDirsAndNonWritablePermissions(); boolean ret = FileUtil.fullyDelete(new MyFile(del)); - validateAndSetWritablePermissions(ret); + validateAndSetWritablePermissions(true, ret); + } + + @Test + public void testFailFullyDeleteGrantPermissions() throws IOException { + setupDirsAndNonWritablePermissions(); + boolean ret = FileUtil.fullyDelete(new MyFile(del), true); + // this time the directories with revoked permissions *should* be deleted: + validateAndSetWritablePermissions(false, ret); } /** @@ -388,7 +446,10 @@ public class TestFileUtil { */ @Override public File[] listFiles() { - File[] files = super.listFiles(); + final File[] files = super.listFiles(); + if (files == null) { + return null; + } List filesList = Arrays.asList(files); Collections.sort(filesList); File[] myFiles = new MyFile[files.length]; @@ -405,9 +466,17 @@ public class TestFileUtil { LOG.info("Running test to verify failure of fullyDeleteContents()"); setupDirsAndNonWritablePermissions(); boolean ret = FileUtil.fullyDeleteContents(new MyFile(del)); - validateAndSetWritablePermissions(ret); + validateAndSetWritablePermissions(true, ret); } + @Test + public void testFailFullyDeleteContentsGrantPermissions() throws IOException { + setupDirsAndNonWritablePermissions(); + boolean ret = FileUtil.fullyDeleteContents(new MyFile(del), true); + // this time the directories with revoked permissions *should* be deleted: + validateAndSetWritablePermissions(false, ret); + } + @Test public void testCopyMergeSingleDirectory() throws IOException { setupDirs(); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index acbf8918bfc..079bc370209 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -119,6 +119,18 @@ public class TestHttpServer extends HttpServerFunctionalTest { } } + @SuppressWarnings("serial") + public static class LongHeaderServlet extends HttpServlet { + @SuppressWarnings("unchecked") + @Override + public void doGet(HttpServletRequest request, + HttpServletResponse response + ) throws ServletException, IOException { + Assert.assertEquals(63 * 1024, request.getHeader("longheader").length()); + response.setStatus(HttpServletResponse.SC_OK); + } + } + @SuppressWarnings("serial") public static class HtmlContentServlet extends HttpServlet { @Override @@ -139,6 +151,7 @@ public class TestHttpServer extends HttpServerFunctionalTest { server.addServlet("echo", "/echo", EchoServlet.class); server.addServlet("echomap", "/echomap", EchoMapServlet.class); server.addServlet("htmlcontent", "/htmlcontent", HtmlContentServlet.class); + server.addServlet("longheader", "/longheader", LongHeaderServlet.class); server.addJerseyResourcePackage( JerseyResource.class.getPackage().getName(), "/jersey/*"); server.start(); @@ -197,6 +210,22 @@ public class TestHttpServer extends HttpServerFunctionalTest { readOutput(new URL(baseUrl, "/echomap?a=b&c<=d&a=>"))); } + /** + * Test that verifies headers can be up to 64K long. + * The test adds a 63K header leaving 1K for other headers. + * This is because the header buffer setting is for ALL headers, + * names and values included. */ + @Test public void testLongHeader() throws Exception { + URL url = new URL(baseUrl, "/longheader"); + HttpURLConnection conn = (HttpURLConnection) url.openConnection(); + StringBuilder sb = new StringBuilder(); + for (int i = 0 ; i < 63 * 1024; i++) { + sb.append("a"); + } + conn.setRequestProperty("longheader", sb.toString()); + assertEquals(HttpURLConnection.HTTP_OK, conn.getResponseCode()); + } + @Test public void testContentTypes() throws Exception { // Static CSS files should have text/css URL cssUrl = new URL(baseUrl, "/static/test.css"); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java index 280f1a8785c..7601211a745 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/compress/TestCodecFactory.java @@ -256,5 +256,17 @@ public class TestCodecFactory extends TestCase { checkCodec("overridden factory for .gz", NewGzipCodec.class, codec); codec = factory.getCodecByClassName(NewGzipCodec.class.getCanonicalName()); checkCodec("overridden factory for gzip codec", NewGzipCodec.class, codec); + + Configuration conf = new Configuration(); + conf.set("io.compression.codecs", + " org.apache.hadoop.io.compress.GzipCodec , " + + " org.apache.hadoop.io.compress.DefaultCodec , " + + " org.apache.hadoop.io.compress.BZip2Codec "); + try { + CompressionCodecFactory.getCodecClasses(conf); + } catch (IllegalArgumentException e) { + fail("IllegalArgumentException is unexpected"); + } + } } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java index 21aa44a7c99..d358913ef05 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java @@ -67,7 +67,7 @@ public class RPCCallBenchmark implements Tool, Configurable { private int serverReaderThreads = 1; private int clientThreads = 0; private String host = "0.0.0.0"; - private int port = 12345; + private int port = 0; public int secondsToRun = 15; private int msgSize = 1024; public Class rpcEngine = @@ -201,11 +201,21 @@ public class RPCCallBenchmark implements Tool, Configurable { } } + public int getPort() { + if (port == 0) { + port = NetUtils.getFreeSocketPort(); + if (port == 0) { + throw new RuntimeException("Could not find a free port"); + } + } + return port; + } + @Override public String toString() { return "rpcEngine=" + rpcEngine + "\nserverThreads=" + serverThreads + "\nserverReaderThreads=" + serverReaderThreads + "\nclientThreads=" - + clientThreads + "\nhost=" + host + "\nport=" + port + + clientThreads + "\nhost=" + host + "\nport=" + getPort() + "\nsecondsToRun=" + secondsToRun + "\nmsgSize=" + msgSize; } } @@ -228,12 +238,12 @@ public class RPCCallBenchmark implements Tool, Configurable { .newReflectiveBlockingService(serverImpl); server = new RPC.Builder(conf).setProtocol(TestRpcService.class) - .setInstance(service).setBindAddress(opts.host).setPort(opts.port) + .setInstance(service).setBindAddress(opts.host).setPort(opts.getPort()) .setNumHandlers(opts.serverThreads).setVerbose(false).build(); } else if (opts.rpcEngine == WritableRpcEngine.class) { server = new RPC.Builder(conf).setProtocol(TestProtocol.class) .setInstance(new TestRPC.TestImpl()).setBindAddress(opts.host) - .setPort(opts.port).setNumHandlers(opts.serverThreads) + .setPort(opts.getPort()).setNumHandlers(opts.serverThreads) .setVerbose(false).build(); } else { throw new RuntimeException("Bad engine: " + opts.rpcEngine); @@ -378,7 +388,7 @@ public class RPCCallBenchmark implements Tool, Configurable { * Create a client proxy for the specified engine. */ private RpcServiceWrapper createRpcClient(MyOptions opts) throws IOException { - InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.port); + InetSocketAddress addr = NetUtils.createSocketAddr(opts.host, opts.getPort()); if (opts.rpcEngine == ProtobufRpcEngine.class) { final TestRpcService proxy = RPC.getProxy(TestRpcService.class, 0, addr, conf); diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java index acbf32b021a..5762b56b9a0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/TestIPC.java @@ -62,7 +62,6 @@ public class TestIPC { final private static Configuration conf = new Configuration(); final static private int PING_INTERVAL = 1000; final static private int MIN_SLEEP_TIME = 1000; - /** * Flag used to turn off the fault injection behavior * of the various writables. @@ -499,6 +498,26 @@ public class TestIPC { client.call(new LongWritable(RANDOM.nextLong()), addr, null, null, 3*PING_INTERVAL+MIN_SLEEP_TIME, conf); } + + @Test + public void testIpcConnectTimeout() throws Exception { + // start server + Server server = new TestServer(1, true); + InetSocketAddress addr = NetUtils.getConnectAddress(server); + //Intentionally do not start server to get a connection timeout + + // start client + Client.setConnectTimeout(conf, 100); + Client client = new Client(LongWritable.class, conf); + // set the rpc timeout to twice the MIN_SLEEP_TIME + try { + client.call(new LongWritable(RANDOM.nextLong()), + addr, null, null, MIN_SLEEP_TIME*2, conf); + fail("Expected an exception to have been thrown"); + } catch (SocketTimeoutException e) { + LOG.info("Get a SocketTimeoutException ", e); + } + } /** * Check that file descriptors aren't leaked by starting diff --git a/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 891a67b61f4..56eab0553d2 100644 --- a/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-common-project/hadoop-common/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.ipc.TestSaslRPC$TestTokenIdentifier org.apache.hadoop.security.token.delegation.TestDelegationToken$TestDelegationTokenIdentifier diff --git a/hadoop-common-project/pom.xml b/hadoop-common-project/pom.xml index 2ee82dd7500..a09c29b67a2 100644 --- a/hadoop-common-project/pom.xml +++ b/hadoop-common-project/pom.xml @@ -49,9 +49,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index db0412fe46a..2e03c0ebab0 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -66,9 +66,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml index ec02b61d1be..fb5febbe18f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml @@ -359,6 +359,8 @@ apache-rat-plugin + src/test/resources/classutils.txt + src/main/conf/httpfs-signature.secret diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java index 39b7e4fb61a..dd395f67495 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/main/java/org/apache/hadoop/lib/servlet/HostnameFilter.java @@ -29,6 +29,9 @@ import javax.servlet.ServletRequest; import javax.servlet.ServletResponse; import java.io.IOException; import java.net.InetAddress; +import java.net.UnknownHostException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * Filter that resolves the requester hostname. @@ -36,6 +39,7 @@ import java.net.InetAddress; @InterfaceAudience.Private public class HostnameFilter implements Filter { static final ThreadLocal HOSTNAME_TL = new ThreadLocal(); + private static final Logger log = LoggerFactory.getLogger(HostnameFilter.class); /** * Initializes the filter. @@ -66,7 +70,19 @@ public class HostnameFilter implements Filter { public void doFilter(ServletRequest request, ServletResponse response, FilterChain chain) throws IOException, ServletException { try { - String hostname = InetAddress.getByName(request.getRemoteAddr()).getCanonicalHostName(); + String hostname; + try { + String address = request.getRemoteAddr(); + if (address != null) { + hostname = InetAddress.getByName(address).getCanonicalHostName(); + } else { + log.warn("Request remote address is NULL"); + hostname = "???"; + } + } catch (UnknownHostException ex) { + log.warn("Request remote address could not be resolved, {0}", ex.toString(), ex); + hostname = "???"; + } HOSTNAME_TL.set(hostname); chain.doFilter(request, response); } finally { diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java index 44da0afd705..3148d3a6820 100644 --- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java +++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/src/test/java/org/apache/hadoop/lib/servlet/TestHostnameFilter.java @@ -64,4 +64,30 @@ public class TestHostnameFilter extends HTestCase { filter.destroy(); } + @Test + public void testMissingHostname() throws Exception { + ServletRequest request = Mockito.mock(ServletRequest.class); + Mockito.when(request.getRemoteAddr()).thenReturn(null); + + ServletResponse response = Mockito.mock(ServletResponse.class); + + final AtomicBoolean invoked = new AtomicBoolean(); + + FilterChain chain = new FilterChain() { + @Override + public void doFilter(ServletRequest servletRequest, ServletResponse servletResponse) + throws IOException, ServletException { + assertTrue(HostnameFilter.get().contains("???")); + invoked.set(true); + } + }; + + Filter filter = new HostnameFilter(); + filter.init(null); + assertNull(HostnameFilter.get()); + filter.doFilter(request, response, chain); + assertTrue(invoked.get()); + assertNull(HostnameFilter.get()); + filter.destroy(); + } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt index 4a3228fe421..956d41f007a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt @@ -309,6 +309,11 @@ Release 2.0.3-alpha - Unreleased HDFS-4367. GetDataEncryptionKeyResponseProto does not handle null response. (suresh) + HDFS-4364. GetLinkTargetResponseProto does not handle null path. (suresh) + + HDFS-4369. GetBlockKeysResponseProto does not handle null response. + (suresh) + NEW FEATURES HDFS-2656. Add libwebhdfs, a pure C client based on WebHDFS. @@ -480,8 +485,22 @@ Release 2.0.3-alpha - Unreleased HDFS-4381. Document fsimage format details in FSImageFormat class javadoc. (Jing Zhao via suresh) + HDFS-4375. Use token request messages defined in hadoop common. + (suresh) + + HDFS-4392. Use NetUtils#getFreeSocketPort in MiniDFSCluster. + (Andrew Purtell via suresh) + + HDFS-4393. Make empty request and responses in protocol translators can be + static final members. (Brandon Li via suresh) + + HDFS-4403. DFSClient can infer checksum type when not provided by reading + first byte (todd) + OPTIMIZATIONS + HDFS-3429. DataNode reads checksums even if client does not need them (todd) + BUG FIXES HDFS-3919. MiniDFSCluster:waitClusterUp can hang forever. @@ -703,6 +722,12 @@ Release 2.0.3-alpha - Unreleased HDFS-1245. Pluggable block id generation. (shv) + HDFS-4415. HostnameFilter should handle hostname resolution failures and + continue processing. (Robert Kanter via atm) + + HDFS-4359. Slow RPC responses from NN can prevent metrics collection on + DNs. (liang xie via atm) + BREAKDOWN OF HDFS-3077 SUBTASKS HDFS-3077. Quorum-based protocol for reading and writing edit logs. @@ -805,9 +830,12 @@ Release 2.0.3-alpha - Unreleased HDFS-4017. Unclosed FileInputStream in GetJournalEditServlet (Chao Shi via todd) - HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas + HDFS-4351. In BlockPlacementPolicyDefault.chooseTarget(..), numOfReplicas needs to be updated when avoiding stale nodes. (Andrew Wang via szetszwo) + HDFS-4399. Fix RAT warnings by excluding images sub-dir in docs. (Thomas + Graves via acmurthy) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -2185,6 +2213,18 @@ Release 2.0.0-alpha - 05-23-2012 HDFS-3039. Address findbugs and javadoc warnings on branch. (todd via atm) +Release 0.23.7 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES @@ -2202,7 +2242,12 @@ Release 0.23.6 - UNRELEASED HDFS-4248. Renaming directories may incorrectly remove the paths in leases under the tree. (daryn via szetszwo) -Release 0.23.5 - UNRELEASED + HDFS-4385. Maven RAT plugin is not checking all source files (tgraves) + + HDFS-4426. Secondary namenode shuts down immediately after startup. + (Arpit Agarwal via suresh) + +Release 0.23.5 - 2012-11-28 INCOMPATIBLE CHANGES diff --git a/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt b/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt index 59bcdbc9783..966012349ba 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt +++ b/hadoop-hdfs-project/hadoop-hdfs/LICENSE.txt @@ -242,3 +242,30 @@ For the org.apache.hadoop.util.bloom.* classes: * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ + +For src/main/native/util/tree.h: + +/*- + * Copyright 2002 Niels Provos + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/pom.xml index 67799c85700..535b24198af 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/pom.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/pom.xml @@ -516,9 +516,12 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> src/test/resources/data* src/test/resources/editsStored* src/test/resources/empty-file + src/main/native/util/tree.h + src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj src/main/webapps/datanode/robots.txt src/main/docs/releasenotes.html src/contrib/** + src/site/resources/images/* @@ -563,6 +566,9 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> + + diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake index 912a4ba8546..ac0b5308cc3 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake +++ b/hadoop-hdfs-project/hadoop-hdfs/src/config.h.cmake @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #ifndef CONFIG_H #define CONFIG_H diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml index cfb46edfc08..af260fa93c9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/docs/src/documentation/content/xdocs/hdfs_permissions_guide.xml @@ -92,10 +92,11 @@ There is no provision within HDFS for creating user identities, establishing gro
Group Mapping

-Once a username has been determined as described above, the list of groups is determined by a group mapping -service, configured by the hadoop.security.group.mapping property. -The default implementation, org.apache.hadoop.security.ShellBasedUnixGroupsMapping, will shell out -to the Unix bash -c groups command to resolve a list of groups for a user. +Once a username has been determined as described above, the list of groups is +determined by a group mapping service, configured by the +hadoop.security.group.mapping property. Refer to the +core-default.xml for details of the hadoop.security.group.mapping +implementation.

An alternate implementation, which connects directly to an LDAP server to resolve the list of groups, is available diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java index 00dcabeaa9e..55a38a740cf 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java @@ -152,6 +152,7 @@ import org.apache.hadoop.security.token.SecretManager.InvalidToken; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenRenewer; import org.apache.hadoop.util.DataChecksum; +import org.apache.hadoop.util.DataChecksum.Type; import org.apache.hadoop.util.Progressable; import org.apache.hadoop.util.Time; @@ -1571,7 +1572,7 @@ public class DFSClient implements java.io.Closeable { */ public MD5MD5CRC32FileChecksum getFileChecksum(String src) throws IOException { checkOpen(); - return getFileChecksum(src, namenode, socketFactory, + return getFileChecksum(src, clientName, namenode, socketFactory, dfsClientConf.socketTimeout, getDataEncryptionKey(), dfsClientConf.connectToDnViaHostname); } @@ -1614,9 +1615,16 @@ public class DFSClient implements java.io.Closeable { /** * Get the checksum of a file. * @param src The file path + * @param clientName the name of the client requesting the checksum. + * @param namenode the RPC proxy for the namenode + * @param socketFactory to create sockets to connect to DNs + * @param socketTimeout timeout to use when connecting and waiting for a response + * @param encryptionKey the key needed to communicate with DNs in this cluster + * @param connectToDnViaHostname {@see #connectToDnViaHostname()} * @return The checksum */ - public static MD5MD5CRC32FileChecksum getFileChecksum(String src, + static MD5MD5CRC32FileChecksum getFileChecksum(String src, + String clientName, ClientProtocol namenode, SocketFactory socketFactory, int socketTimeout, DataEncryptionKey encryptionKey, boolean connectToDnViaHostname) throws IOException { @@ -1651,32 +1659,16 @@ public class DFSClient implements java.io.Closeable { final int timeout = 3000 * datanodes.length + socketTimeout; boolean done = false; for(int j = 0; !done && j < datanodes.length; j++) { - Socket sock = null; DataOutputStream out = null; DataInputStream in = null; try { //connect to a datanode - sock = socketFactory.createSocket(); - String dnAddr = datanodes[j].getXferAddr(connectToDnViaHostname); - if (LOG.isDebugEnabled()) { - LOG.debug("Connecting to datanode " + dnAddr); - } - NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout); - sock.setSoTimeout(timeout); - - OutputStream unbufOut = NetUtils.getOutputStream(sock); - InputStream unbufIn = NetUtils.getInputStream(sock); - if (encryptionKey != null) { - IOStreamPair encryptedStreams = - DataTransferEncryptor.getEncryptedStreams( - unbufOut, unbufIn, encryptionKey); - unbufOut = encryptedStreams.out; - unbufIn = encryptedStreams.in; - } - out = new DataOutputStream(new BufferedOutputStream(unbufOut, + IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname, + encryptionKey, datanodes[j], timeout); + out = new DataOutputStream(new BufferedOutputStream(pair.out, HdfsConstants.SMALL_BUFFER_SIZE)); - in = new DataInputStream(unbufIn); + in = new DataInputStream(pair.in); if (LOG.isDebugEnabled()) { LOG.debug("write to " + datanodes[j] + ": " @@ -1689,19 +1681,8 @@ public class DFSClient implements java.io.Closeable { BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); if (reply.getStatus() != Status.SUCCESS) { - if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN - && i > lastRetriedIndex) { - if (LOG.isDebugEnabled()) { - LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " - + "for file " + src + " for block " + block - + " from datanode " + datanodes[j] - + ". Will retry the block once."); - } - lastRetriedIndex = i; - done = true; // actually it's not done; but we'll retry - i--; // repeat at i-th block - refetchBlocks = true; - break; + if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) { + throw new InvalidBlockTokenException(); } else { throw new IOException("Bad response " + reply + " for block " + block + " from datanode " + datanodes[j]); @@ -1733,8 +1714,18 @@ public class DFSClient implements java.io.Closeable { md5.write(md5out); // read crc-type - final DataChecksum.Type ct = PBHelper.convert(checksumData - .getCrcType()); + final DataChecksum.Type ct; + if (checksumData.hasCrcType()) { + ct = PBHelper.convert(checksumData + .getCrcType()); + } else { + LOG.debug("Retrieving checksum from an earlier-version DataNode: " + + "inferring checksum by reading first byte"); + ct = inferChecksumTypeByReading( + clientName, socketFactory, socketTimeout, lb, datanodes[j], + encryptionKey, connectToDnViaHostname); + } + if (i == 0) { // first block crcType = ct; } else if (crcType != DataChecksum.Type.MIXED @@ -1752,12 +1743,25 @@ public class DFSClient implements java.io.Closeable { } LOG.debug("got reply from " + datanodes[j] + ": md5=" + md5); } + } catch (InvalidBlockTokenException ibte) { + if (i > lastRetriedIndex) { + if (LOG.isDebugEnabled()) { + LOG.debug("Got access token error in response to OP_BLOCK_CHECKSUM " + + "for file " + src + " for block " + block + + " from datanode " + datanodes[j] + + ". Will retry the block once."); + } + lastRetriedIndex = i; + done = true; // actually it's not done; but we'll retry + i--; // repeat at i-th block + refetchBlocks = true; + break; + } } catch (IOException ie) { LOG.warn("src=" + src + ", datanodes["+j+"]=" + datanodes[j], ie); } finally { IOUtils.closeStream(in); IOUtils.closeStream(out); - IOUtils.closeSocket(sock); } } @@ -1789,6 +1793,90 @@ public class DFSClient implements java.io.Closeable { } } + /** + * Connect to the given datanode's datantrasfer port, and return + * the resulting IOStreamPair. This includes encryption wrapping, etc. + */ + private static IOStreamPair connectToDN( + SocketFactory socketFactory, boolean connectToDnViaHostname, + DataEncryptionKey encryptionKey, DatanodeInfo dn, int timeout) + throws IOException + { + boolean success = false; + Socket sock = null; + try { + sock = socketFactory.createSocket(); + String dnAddr = dn.getXferAddr(connectToDnViaHostname); + if (LOG.isDebugEnabled()) { + LOG.debug("Connecting to datanode " + dnAddr); + } + NetUtils.connect(sock, NetUtils.createSocketAddr(dnAddr), timeout); + sock.setSoTimeout(timeout); + + OutputStream unbufOut = NetUtils.getOutputStream(sock); + InputStream unbufIn = NetUtils.getInputStream(sock); + IOStreamPair ret; + if (encryptionKey != null) { + ret = DataTransferEncryptor.getEncryptedStreams( + unbufOut, unbufIn, encryptionKey); + } else { + ret = new IOStreamPair(unbufIn, unbufOut); + } + success = true; + return ret; + } finally { + if (!success) { + IOUtils.closeSocket(sock); + } + } + } + + /** + * Infer the checksum type for a replica by sending an OP_READ_BLOCK + * for the first byte of that replica. This is used for compatibility + * with older HDFS versions which did not include the checksum type in + * OpBlockChecksumResponseProto. + * + * @param in input stream from datanode + * @param out output stream to datanode + * @param lb the located block + * @param clientName the name of the DFSClient requesting the checksum + * @param dn the connected datanode + * @return the inferred checksum type + * @throws IOException if an error occurs + */ + private static Type inferChecksumTypeByReading( + String clientName, SocketFactory socketFactory, int socketTimeout, + LocatedBlock lb, DatanodeInfo dn, + DataEncryptionKey encryptionKey, boolean connectToDnViaHostname) + throws IOException { + IOStreamPair pair = connectToDN(socketFactory, connectToDnViaHostname, + encryptionKey, dn, socketTimeout); + + try { + DataOutputStream out = new DataOutputStream(new BufferedOutputStream(pair.out, + HdfsConstants.SMALL_BUFFER_SIZE)); + DataInputStream in = new DataInputStream(pair.in); + + new Sender(out).readBlock(lb.getBlock(), lb.getBlockToken(), clientName, 0, 1, true); + final BlockOpResponseProto reply = + BlockOpResponseProto.parseFrom(PBHelper.vintPrefixed(in)); + + if (reply.getStatus() != Status.SUCCESS) { + if (reply.getStatus() == Status.ERROR_ACCESS_TOKEN) { + throw new InvalidBlockTokenException(); + } else { + throw new IOException("Bad response " + reply + " trying to read " + + lb.getBlock() + " from datanode " + dn); + } + } + + return PBHelper.convert(reply.getReadOpChecksumInfo().getChecksum().getType()); + } finally { + IOUtils.cleanup(null, pair.in, pair.out); + } + } + /** * Set permissions to a file or directory. * @param src path name. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java index 7268eddb97f..7571128bd00 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java @@ -40,14 +40,18 @@ import org.apache.hadoop.tools.GetUserMappingsProtocol; public class HDFSPolicyProvider extends PolicyProvider { private static final Service[] hdfsServices = new Service[] { - new Service("security.client.protocol.acl", ClientProtocol.class), - new Service("security.client.datanode.protocol.acl", - ClientDatanodeProtocol.class), - new Service("security.datanode.protocol.acl", DatanodeProtocol.class), - new Service("security.inter.datanode.protocol.acl", - InterDatanodeProtocol.class), - new Service("security.namenode.protocol.acl", NamenodeProtocol.class), - new Service("security.qjournal.service.protocol.acl", QJournalProtocol.class), + new Service(CommonConfigurationKeys.SECURITY_CLIENT_PROTOCOL_ACL, + ClientProtocol.class), + new Service(CommonConfigurationKeys.SECURITY_CLIENT_DATANODE_PROTOCOL_ACL, + ClientDatanodeProtocol.class), + new Service(CommonConfigurationKeys.SECURITY_DATANODE_PROTOCOL_ACL, + DatanodeProtocol.class), + new Service(CommonConfigurationKeys.SECURITY_INTER_DATANODE_PROTOCOL_ACL, + InterDatanodeProtocol.class), + new Service(CommonConfigurationKeys.SECURITY_NAMENODE_PROTOCOL_ACL, + NamenodeProtocol.class), + new Service(CommonConfigurationKeys.SECURITY_QJOURNAL_SERVICE_PROTOCOL_ACL, + QJournalProtocol.class), new Service(CommonConfigurationKeys.SECURITY_HA_SERVICE_PROTOCOL_ACL, HAServiceProtocol.class), new Service(CommonConfigurationKeys.SECURITY_ZKFC_PROTOCOL_ACL, diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java index dc449ee2f24..f7ac589921e 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader.java @@ -380,7 +380,8 @@ public class RemoteBlockReader extends FSInputChecker implements BlockReader { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( NetUtils.getOutputStream(sock, HdfsServerConstants.WRITE_TIMEOUT))); - new Sender(out).readBlock(block, blockToken, clientName, startOffset, len); + new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, + verifyChecksum); // // Get bytes in block, set streams diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java index 3450cd1524d..58bb37a724a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/RemoteBlockReader2.java @@ -392,7 +392,8 @@ public class RemoteBlockReader2 implements BlockReader { // in and out will be closed when sock is closed (by the caller) final DataOutputStream out = new DataOutputStream(new BufferedOutputStream( ioStreams.out)); - new Sender(out).readBlock(block, blockToken, clientName, startOffset, len); + new Sender(out).readBlock(block, blockToken, clientName, startOffset, len, + verifyChecksum); // // Get bytes in block diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java index 98094472a73..7f4463789b4 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/DataTransferProtocol.java @@ -55,12 +55,15 @@ public interface DataTransferProtocol { * @param clientName client's name. * @param blockOffset offset of the block. * @param length maximum number of bytes for this read. + * @param sendChecksum if false, the DN should skip reading and sending + * checksums */ public void readBlock(final ExtendedBlock blk, final Token blockToken, final String clientName, final long blockOffset, - final long length) throws IOException; + final long length, + final boolean sendChecksum) throws IOException; /** * Write a block to a datanode pipeline. diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java index b1edc20e3a9..a156dfa538a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Receiver.java @@ -88,7 +88,8 @@ public abstract class Receiver implements DataTransferProtocol { PBHelper.convert(proto.getHeader().getBaseHeader().getToken()), proto.getHeader().getClientName(), proto.getOffset(), - proto.getLen()); + proto.getLen(), + proto.getSendChecksums()); } /** Receive OP_WRITE_BLOCK */ diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java index 8184c500f8b..fb8bee5388b 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/Sender.java @@ -62,6 +62,10 @@ public class Sender implements DataTransferProtocol { private static void send(final DataOutputStream out, final Op opcode, final Message proto) throws IOException { + if (LOG.isTraceEnabled()) { + LOG.trace("Sending DataTransferOp " + proto.getClass().getSimpleName() + + ": " + proto); + } op(out, opcode); proto.writeDelimitedTo(out); out.flush(); @@ -72,12 +76,14 @@ public class Sender implements DataTransferProtocol { final Token blockToken, final String clientName, final long blockOffset, - final long length) throws IOException { + final long length, + final boolean sendChecksum) throws IOException { OpReadBlockProto proto = OpReadBlockProto.newBuilder() .setHeader(DataTransferProtoUtil.buildClientHeader(blk, clientName, blockToken)) .setOffset(blockOffset) .setLen(length) + .setSendChecksums(sendChecksum) .build(); send(out, Op.READ_BLOCK, proto); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java index f38d0145c84..dedba5af276 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientDatanodeProtocolTranslatorPB.java @@ -77,7 +77,7 @@ public class ClientDatanodeProtocolTranslatorPB implements /** RpcController is not used and hence is set to null */ private final static RpcController NULL_CONTROLLER = null; private final ClientDatanodeProtocolPB rpcProxy; - private final static RefreshNamenodesRequestProto REFRESH_NAMENODES = + private final static RefreshNamenodesRequestProto VOID_REFRESH_NAMENODES = RefreshNamenodesRequestProto.newBuilder().build(); public ClientDatanodeProtocolTranslatorPB(DatanodeID datanodeid, @@ -170,7 +170,7 @@ public class ClientDatanodeProtocolTranslatorPB implements @Override public void refreshNamenodes() throws IOException { try { - rpcProxy.refreshNamenodes(NULL_CONTROLLER, REFRESH_NAMENODES); + rpcProxy.refreshNamenodes(NULL_CONTROLLER, VOID_REFRESH_NAMENODES); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java index 023156edd17..4b788913607 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java @@ -40,8 +40,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowS import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; @@ -73,8 +71,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCon import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetContentSummaryResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; @@ -107,8 +103,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto; @@ -143,6 +137,12 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.LocatedBlockProto; import org.apache.hadoop.hdfs.security.token.block.DataEncryptionKey; import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier; import org.apache.hadoop.io.Text; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.security.token.Token; import com.google.protobuf.RpcController; @@ -171,6 +171,78 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements static final DisallowSnapshotResponseProto VOID_DISALLOW_SNAPSHOT_RESPONSE = DisallowSnapshotResponseProto.newBuilder().build(); + private static final CreateResponseProto VOID_CREATE_RESPONSE = + CreateResponseProto.newBuilder().build(); + + private static final AppendResponseProto VOID_APPEND_RESPONSE = + AppendResponseProto.newBuilder().build(); + + private static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = + SetPermissionResponseProto.newBuilder().build(); + + private static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = + SetOwnerResponseProto.newBuilder().build(); + + private static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = + AbandonBlockResponseProto.newBuilder().build(); + + private static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = + ReportBadBlocksResponseProto.newBuilder().build(); + + private static final ConcatResponseProto VOID_CONCAT_RESPONSE = + ConcatResponseProto.newBuilder().build(); + + private static final Rename2ResponseProto VOID_RENAME2_RESPONSE = + Rename2ResponseProto.newBuilder().build(); + + private static final GetListingResponseProto VOID_GETLISTING_RESPONSE = + GetListingResponseProto.newBuilder().build(); + + private static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = + RenewLeaseResponseProto.newBuilder().build(); + + private static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE = + SaveNamespaceResponseProto.newBuilder().build(); + + private static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = + RefreshNodesResponseProto.newBuilder().build(); + + private static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = + FinalizeUpgradeResponseProto.newBuilder().build(); + + private static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = + MetaSaveResponseProto.newBuilder().build(); + + private static final GetFileInfoResponseProto VOID_GETFILEINFO_RESPONSE = + GetFileInfoResponseProto.newBuilder().build(); + + private static final GetFileLinkInfoResponseProto VOID_GETFILELINKINFO_RESPONSE = + GetFileLinkInfoResponseProto.newBuilder().build(); + + private static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = + SetQuotaResponseProto.newBuilder().build(); + + private static final FsyncResponseProto VOID_FSYNC_RESPONSE = + FsyncResponseProto.newBuilder().build(); + + private static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = + SetTimesResponseProto.newBuilder().build(); + + private static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = + CreateSymlinkResponseProto.newBuilder().build(); + + private static final UpdatePipelineResponseProto + VOID_UPDATEPIPELINE_RESPONSE = + UpdatePipelineResponseProto.newBuilder().build(); + + private static final CancelDelegationTokenResponseProto + VOID_CANCELDELEGATIONTOKEN_RESPONSE = + CancelDelegationTokenResponseProto.newBuilder().build(); + + private static final SetBalancerBandwidthResponseProto + VOID_SETBALANCERBANDWIDTH_RESPONSE = + SetBalancerBandwidthResponseProto.newBuilder().build(); + /** * Constructor * @@ -215,9 +287,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } - static final CreateResponseProto VOID_CREATE_RESPONSE = - CreateResponseProto.newBuilder().build(); - @Override public CreateResponseProto create(RpcController controller, CreateRequestProto req) throws ServiceException { @@ -232,9 +301,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return VOID_CREATE_RESPONSE; } - static final AppendResponseProto NULL_APPEND_RESPONSE = - AppendResponseProto.newBuilder().build(); - @Override public AppendResponseProto append(RpcController controller, AppendRequestProto req) throws ServiceException { @@ -244,7 +310,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return AppendResponseProto.newBuilder() .setBlock(PBHelper.convert(result)).build(); } - return NULL_APPEND_RESPONSE; + return VOID_APPEND_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } @@ -263,9 +329,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } - static final SetPermissionResponseProto VOID_SET_PERM_RESPONSE = - SetPermissionResponseProto.newBuilder().build(); - @Override public SetPermissionResponseProto setPermission(RpcController controller, SetPermissionRequestProto req) throws ServiceException { @@ -277,9 +340,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return VOID_SET_PERM_RESPONSE; } - static final SetOwnerResponseProto VOID_SET_OWNER_RESPONSE = - SetOwnerResponseProto.newBuilder().build(); - @Override public SetOwnerResponseProto setOwner(RpcController controller, SetOwnerRequestProto req) throws ServiceException { @@ -293,9 +353,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return VOID_SET_OWNER_RESPONSE; } - static final AbandonBlockResponseProto VOID_ADD_BLOCK_RESPONSE = - AbandonBlockResponseProto.newBuilder().build(); - @Override public AbandonBlockResponseProto abandonBlock(RpcController controller, AbandonBlockRequestProto req) throws ServiceException { @@ -361,9 +418,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final ReportBadBlocksResponseProto VOID_REP_BAD_BLOCK_RESPONSE = - ReportBadBlocksResponseProto.newBuilder().build(); - @Override public ReportBadBlocksResponseProto reportBadBlocks(RpcController controller, ReportBadBlocksRequestProto req) throws ServiceException { @@ -377,9 +431,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return VOID_REP_BAD_BLOCK_RESPONSE; } - static final ConcatResponseProto VOID_CONCAT_RESPONSE = - ConcatResponseProto.newBuilder().build(); - @Override public ConcatResponseProto concat(RpcController controller, ConcatRequestProto req) throws ServiceException { @@ -403,9 +454,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final Rename2ResponseProto VOID_RENAME2_RESPONSE = - Rename2ResponseProto.newBuilder().build(); - @Override public Rename2ResponseProto rename2(RpcController controller, Rename2RequestProto req) throws ServiceException { @@ -442,8 +490,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final GetListingResponseProto NULL_GETLISTING_RESPONSE = - GetListingResponseProto.newBuilder().build(); @Override public GetListingResponseProto getListing(RpcController controller, GetListingRequestProto req) throws ServiceException { @@ -455,16 +501,13 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return GetListingResponseProto.newBuilder().setDirList( PBHelper.convert(result)).build(); } else { - return NULL_GETLISTING_RESPONSE; + return VOID_GETLISTING_RESPONSE; } } catch (IOException e) { throw new ServiceException(e); } } - static final RenewLeaseResponseProto VOID_RENEWLEASE_RESPONSE = - RenewLeaseResponseProto.newBuilder().build(); - @Override public RenewLeaseResponseProto renewLease(RpcController controller, RenewLeaseRequestProto req) throws ServiceException { @@ -549,9 +592,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final SaveNamespaceResponseProto VOID_SAVENAMESPACE_RESPONSE = - SaveNamespaceResponseProto.newBuilder().build(); - @Override public SaveNamespaceResponseProto saveNamespace(RpcController controller, SaveNamespaceRequestProto req) throws ServiceException { @@ -578,9 +618,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } - static final RefreshNodesResponseProto VOID_REFRESHNODES_RESPONSE = - RefreshNodesResponseProto.newBuilder().build(); - @Override public RefreshNodesResponseProto refreshNodes(RpcController controller, RefreshNodesRequestProto req) throws ServiceException { @@ -593,9 +630,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } - static final FinalizeUpgradeResponseProto VOID_FINALIZEUPGRADE_RESPONSE = - FinalizeUpgradeResponseProto.newBuilder().build(); - @Override public FinalizeUpgradeResponseProto finalizeUpgrade(RpcController controller, FinalizeUpgradeRequestProto req) throws ServiceException { @@ -622,9 +656,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final MetaSaveResponseProto VOID_METASAVE_RESPONSE = - MetaSaveResponseProto.newBuilder().build(); - @Override public MetaSaveResponseProto metaSave(RpcController controller, MetaSaveRequestProto req) throws ServiceException { @@ -637,8 +668,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } - static final GetFileInfoResponseProto NULL_GETFILEINFO_RESPONSE = - GetFileInfoResponseProto.newBuilder().build(); @Override public GetFileInfoResponseProto getFileInfo(RpcController controller, GetFileInfoRequestProto req) throws ServiceException { @@ -649,14 +678,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements return GetFileInfoResponseProto.newBuilder().setFs( PBHelper.convert(result)).build(); } - return NULL_GETFILEINFO_RESPONSE; + return VOID_GETFILEINFO_RESPONSE; } catch (IOException e) { throw new ServiceException(e); } } - static final GetFileLinkInfoResponseProto NULL_GETFILELINKINFO_RESPONSE = - GetFileLinkInfoResponseProto.newBuilder().build(); @Override public GetFileLinkInfoResponseProto getFileLinkInfo(RpcController controller, GetFileLinkInfoRequestProto req) throws ServiceException { @@ -668,7 +695,7 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements PBHelper.convert(result)).build(); } else { System.out.println("got null result for getFileLinkInfo for " + req.getSrc()); - return NULL_GETFILELINKINFO_RESPONSE; + return VOID_GETFILELINKINFO_RESPONSE; } } catch (IOException e) { @@ -689,9 +716,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final SetQuotaResponseProto VOID_SETQUOTA_RESPONSE = - SetQuotaResponseProto.newBuilder().build(); - @Override public SetQuotaResponseProto setQuota(RpcController controller, SetQuotaRequestProto req) throws ServiceException { @@ -704,9 +728,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final FsyncResponseProto VOID_FSYNC_RESPONSE = - FsyncResponseProto.newBuilder().build(); - @Override public FsyncResponseProto fsync(RpcController controller, FsyncRequestProto req) throws ServiceException { @@ -718,9 +739,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final SetTimesResponseProto VOID_SETTIMES_RESPONSE = - SetTimesResponseProto.newBuilder().build(); - @Override public SetTimesResponseProto setTimes(RpcController controller, SetTimesRequestProto req) throws ServiceException { @@ -732,9 +750,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final CreateSymlinkResponseProto VOID_CREATESYMLINK_RESPONSE = - CreateSymlinkResponseProto.newBuilder().build(); - @Override public CreateSymlinkResponseProto createSymlink(RpcController controller, CreateSymlinkRequestProto req) throws ServiceException { @@ -752,8 +767,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements GetLinkTargetRequestProto req) throws ServiceException { try { String result = server.getLinkTarget(req.getPath()); - return GetLinkTargetResponseProto.newBuilder().setTargetPath(result) - .build(); + GetLinkTargetResponseProto.Builder builder = GetLinkTargetResponseProto + .newBuilder(); + if (result != null) { + builder.setTargetPath(result); + } + return builder.build(); } catch (IOException e) { throw new ServiceException(e); } @@ -774,9 +793,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final UpdatePipelineResponseProto VOID_UPDATEPIPELINE_RESPONSE = - UpdatePipelineResponseProto.newBuilder().build(); - @Override public UpdatePipelineResponseProto updatePipeline(RpcController controller, UpdatePipelineRequestProto req) throws ServiceException { @@ -818,16 +834,12 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements long result = server.renewDelegationToken(PBHelper .convertDelegationToken(req.getToken())); return RenewDelegationTokenResponseProto.newBuilder() - .setNewExireTime(result).build(); + .setNewExpiryTime(result).build(); } catch (IOException e) { throw new ServiceException(e); } } - static final CancelDelegationTokenResponseProto - VOID_CANCELDELEGATIONTOKEN_RESPONSE = - CancelDelegationTokenResponseProto.newBuilder().build(); - @Override public CancelDelegationTokenResponseProto cancelDelegationToken( RpcController controller, CancelDelegationTokenRequestProto req) @@ -841,10 +853,6 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements } } - static final SetBalancerBandwidthResponseProto - VOID_SETBALANCERBANDWIDTH_RESPONSE = - SetBalancerBandwidthResponseProto.newBuilder().build(); - @Override public SetBalancerBandwidthResponseProto setBalancerBandwidth( RpcController controller, SetBalancerBandwidthRequestProto req) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java index e65e43a6bda..e0c55044e68 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java @@ -52,7 +52,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlo import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AllowSnapshotRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AppendResponseProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CompleteRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ConcatRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CreateRequestProto; @@ -70,14 +69,13 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetCon import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDataEncryptionKeyResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDatanodeReportRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetDelegationTokenResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFileLinkInfoResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetFsStatusRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetLinkTargetResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetListingResponseProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.GetPreferredBlockSizeRequestProto; @@ -92,7 +90,6 @@ import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Refres import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.Rename2RequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenameSnapshotRequestProto; -import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RenewLeaseRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.ReportBadBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.RestoreFailedStorageRequestProto; @@ -120,6 +117,10 @@ import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ipc.RpcClientUtil; import org.apache.hadoop.security.AccessControlException; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.security.token.Token; import com.google.protobuf.ByteString; @@ -136,6 +137,29 @@ public class ClientNamenodeProtocolTranslatorPB implements ProtocolMetaInterface, ClientProtocol, Closeable, ProtocolTranslator { final private ClientNamenodeProtocolPB rpcProxy; + static final GetServerDefaultsRequestProto VOID_GET_SERVER_DEFAULT_REQUEST = + GetServerDefaultsRequestProto.newBuilder().build(); + + private final static GetFsStatusRequestProto VOID_GET_FSSTATUS_REQUEST = + GetFsStatusRequestProto.newBuilder().build(); + + private final static SaveNamespaceRequestProto VOID_SAVE_NAMESPACE_REQUEST = + SaveNamespaceRequestProto.newBuilder().build(); + + private final static RollEditsRequestProto VOID_ROLLEDITS_REQUEST = + RollEditsRequestProto.getDefaultInstance(); + + private final static RefreshNodesRequestProto VOID_REFRESH_NODES_REQUEST = + RefreshNodesRequestProto.newBuilder().build(); + + private final static FinalizeUpgradeRequestProto + VOID_FINALIZE_UPGRADE_REQUEST = + FinalizeUpgradeRequestProto.newBuilder().build(); + + private final static GetDataEncryptionKeyRequestProto + VOID_GET_DATA_ENCRYPTIONKEY_REQUEST = + GetDataEncryptionKeyRequestProto.newBuilder().build(); + public ClientNamenodeProtocolTranslatorPB(ClientNamenodeProtocolPB proxy) { rpcProxy = proxy; } @@ -167,7 +191,7 @@ public class ClientNamenodeProtocolTranslatorPB implements @Override public FsServerDefaults getServerDefaults() throws IOException { - GetServerDefaultsRequestProto req = GetServerDefaultsRequestProto.newBuilder().build(); + GetServerDefaultsRequestProto req = VOID_GET_SERVER_DEFAULT_REQUEST; try { return PBHelper .convert(rpcProxy.getServerDefaults(null, req).getServerDefaults()); @@ -480,9 +504,9 @@ public class ClientNamenodeProtocolTranslatorPB implements @Override public long[] getStats() throws IOException { - GetFsStatusRequestProto req = GetFsStatusRequestProto.newBuilder().build(); try { - return PBHelper.convert(rpcProxy.getFsStats(null, req)); + return PBHelper.convert(rpcProxy.getFsStats(null, + VOID_GET_FSSTATUS_REQUEST)); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -529,10 +553,8 @@ public class ClientNamenodeProtocolTranslatorPB implements @Override public void saveNamespace() throws AccessControlException, IOException { - SaveNamespaceRequestProto req = SaveNamespaceRequestProto.newBuilder() - .build(); try { - rpcProxy.saveNamespace(null, req); + rpcProxy.saveNamespace(null, VOID_SAVE_NAMESPACE_REQUEST); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -540,9 +562,9 @@ public class ClientNamenodeProtocolTranslatorPB implements @Override public long rollEdits() throws AccessControlException, IOException { - RollEditsRequestProto req = RollEditsRequestProto.getDefaultInstance(); try { - RollEditsResponseProto resp = rpcProxy.rollEdits(null, req); + RollEditsResponseProto resp = rpcProxy.rollEdits(null, + VOID_ROLLEDITS_REQUEST); return resp.getNewSegmentTxId(); } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); @@ -564,9 +586,8 @@ public class ClientNamenodeProtocolTranslatorPB implements @Override public void refreshNodes() throws IOException { - RefreshNodesRequestProto req = RefreshNodesRequestProto.newBuilder().build(); try { - rpcProxy.refreshNodes(null, req); + rpcProxy.refreshNodes(null, VOID_REFRESH_NODES_REQUEST); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -574,9 +595,8 @@ public class ClientNamenodeProtocolTranslatorPB implements @Override public void finalizeUpgrade() throws IOException { - FinalizeUpgradeRequestProto req = FinalizeUpgradeRequestProto.newBuilder().build(); try { - rpcProxy.finalizeUpgrade(null, req); + rpcProxy.finalizeUpgrade(null, VOID_FINALIZE_UPGRADE_REQUEST); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -722,7 +742,8 @@ public class ClientNamenodeProtocolTranslatorPB implements GetLinkTargetRequestProto req = GetLinkTargetRequestProto.newBuilder() .setPath(path).build(); try { - return rpcProxy.getLinkTarget(null, req).getTargetPath(); + GetLinkTargetResponseProto rsp = rpcProxy.getLinkTarget(null, req); + return rsp.hasTargetPath() ? rsp.getTargetPath() : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -783,7 +804,7 @@ public class ClientNamenodeProtocolTranslatorPB implements setToken(PBHelper.convert(token)). build(); try { - return rpcProxy.renewDelegationToken(null, req).getNewExireTime(); + return rpcProxy.renewDelegationToken(null, req).getNewExpiryTime(); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -824,12 +845,10 @@ public class ClientNamenodeProtocolTranslatorPB implements @Override public DataEncryptionKey getDataEncryptionKey() throws IOException { - GetDataEncryptionKeyRequestProto req = GetDataEncryptionKeyRequestProto - .newBuilder().build(); try { - GetDataEncryptionKeyResponseProto rsp = - rpcProxy.getDataEncryptionKey(null, req); - return rsp.hasDataEncryptionKey() ? + GetDataEncryptionKeyResponseProto rsp = rpcProxy.getDataEncryptionKey( + null, VOID_GET_DATA_ENCRYPTIONKEY_REQUEST); + return rsp.hasDataEncryptionKey() ? PBHelper.convert(rsp.getDataEncryptionKey()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java index 3150414d468..fd4cc4b01c5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolClientSideTranslatorPB.java @@ -84,7 +84,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements /** RpcController is not used and hence is set to null */ private final DatanodeProtocolPB rpcProxy; - private static final VersionRequestProto VERSION_REQUEST = + private static final VersionRequestProto VOID_VERSION_REQUEST = VersionRequestProto.newBuilder().build(); private final static RpcController NULL_CONTROLLER = null; @@ -243,7 +243,7 @@ public class DatanodeProtocolClientSideTranslatorPB implements public NamespaceInfo versionRequest() throws IOException { try { return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER, - VERSION_REQUEST).getInfo()); + VOID_VERSION_REQUEST).getInfo()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java index 861852f9b31..3e424602fa9 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/DatanodeProtocolServerSideTranslatorPB.java @@ -62,15 +62,17 @@ public class DatanodeProtocolServerSideTranslatorPB implements DatanodeProtocolPB { private final DatanodeProtocol impl; - private static final ErrorReportResponseProto ERROR_REPORT_RESPONSE_PROTO = - ErrorReportResponseProto.newBuilder().build(); + private static final ErrorReportResponseProto + VOID_ERROR_REPORT_RESPONSE_PROTO = + ErrorReportResponseProto.newBuilder().build(); private static final BlockReceivedAndDeletedResponseProto - BLOCK_RECEIVED_AND_DELETE_RESPONSE = + VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE = BlockReceivedAndDeletedResponseProto.newBuilder().build(); - private static final ReportBadBlocksResponseProto REPORT_BAD_BLOCK_RESPONSE = - ReportBadBlocksResponseProto.newBuilder().build(); + private static final ReportBadBlocksResponseProto + VOID_REPORT_BAD_BLOCK_RESPONSE = + ReportBadBlocksResponseProto.newBuilder().build(); private static final CommitBlockSynchronizationResponseProto - COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO = + VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO = CommitBlockSynchronizationResponseProto.newBuilder().build(); public DatanodeProtocolServerSideTranslatorPB(DatanodeProtocol impl) { @@ -180,7 +182,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return BLOCK_RECEIVED_AND_DELETE_RESPONSE; + return VOID_BLOCK_RECEIVED_AND_DELETE_RESPONSE; } @Override @@ -192,7 +194,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return ERROR_REPORT_RESPONSE_PROTO; + return VOID_ERROR_REPORT_RESPONSE_PROTO; } @Override @@ -221,7 +223,7 @@ public class DatanodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return REPORT_BAD_BLOCK_RESPONSE; + return VOID_REPORT_BAD_BLOCK_RESPONSE; } @Override @@ -242,6 +244,6 @@ public class DatanodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO; + return VOID_COMMIT_BLOCK_SYNCHRONIZATION_RESPONSE_PROTO; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java index 1805d146640..a4259375301 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/JournalProtocolServerSideTranslatorPB.java @@ -42,6 +42,13 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB /** Server side implementation to delegate the requests to */ private final JournalProtocol impl; + private final static JournalResponseProto VOID_JOURNAL_RESPONSE = + JournalResponseProto.newBuilder().build(); + + private final static StartLogSegmentResponseProto + VOID_START_LOG_SEGMENT_RESPONSE = + StartLogSegmentResponseProto.newBuilder().build(); + public JournalProtocolServerSideTranslatorPB(JournalProtocol impl) { this.impl = impl; } @@ -56,7 +63,7 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB } catch (IOException e) { throw new ServiceException(e); } - return JournalResponseProto.newBuilder().build(); + return VOID_JOURNAL_RESPONSE; } /** @see JournalProtocol#startLogSegment */ @@ -69,7 +76,7 @@ public class JournalProtocolServerSideTranslatorPB implements JournalProtocolPB } catch (IOException e) { throw new ServiceException(e); } - return StartLogSegmentResponseProto.newBuilder().build(); + return VOID_START_LOG_SEGMENT_RESPONSE; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java index c3466e15a5e..aaf8c461299 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolServerSideTranslatorPB.java @@ -63,6 +63,12 @@ public class NamenodeProtocolServerSideTranslatorPB implements NamenodeProtocolPB { private final NamenodeProtocol impl; + private final static ErrorReportResponseProto VOID_ERROR_REPORT_RESPONSE = + ErrorReportResponseProto.newBuilder().build(); + + private final static EndCheckpointResponseProto VOID_END_CHECKPOINT_RESPONSE = + EndCheckpointResponseProto.newBuilder().build(); + public NamenodeProtocolServerSideTranslatorPB(NamenodeProtocol impl) { this.impl = impl; } @@ -91,8 +97,12 @@ public class NamenodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return GetBlockKeysResponseProto.newBuilder() - .setKeys(PBHelper.convert(keys)).build(); + GetBlockKeysResponseProto.Builder builder = + GetBlockKeysResponseProto.newBuilder(); + if (keys != null) { + builder.setKeys(PBHelper.convert(keys)); + } + return builder.build(); } @Override @@ -143,7 +153,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return ErrorReportResponseProto.newBuilder().build(); + return VOID_ERROR_REPORT_RESPONSE; } @Override @@ -181,7 +191,7 @@ public class NamenodeProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return EndCheckpointResponseProto.newBuilder().build(); + return VOID_END_CHECKPOINT_RESPONSE; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java index 6c630d168ed..918f6843ac6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos.VersionRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.EndCheckpointRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.ErrorReportRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysRequestProto; +import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlockKeysResponseProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetBlocksRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetEditLogManifestRequestProto; import org.apache.hadoop.hdfs.protocol.proto.NamenodeProtocolProtos.GetMostRecentCheckpointTxIdRequestProto; @@ -67,13 +68,13 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol, /* * Protobuf requests with no parameters instantiated only once */ - private static final GetBlockKeysRequestProto GET_BLOCKKEYS = + private static final GetBlockKeysRequestProto VOID_GET_BLOCKKEYS_REQUEST = GetBlockKeysRequestProto.newBuilder().build(); - private static final GetTransactionIdRequestProto GET_TRANSACTIONID = + private static final GetTransactionIdRequestProto VOID_GET_TRANSACTIONID_REQUEST = GetTransactionIdRequestProto.newBuilder().build(); - private static final RollEditLogRequestProto ROLL_EDITLOG = + private static final RollEditLogRequestProto VOID_ROLL_EDITLOG_REQUEST = RollEditLogRequestProto.newBuilder().build(); - private static final VersionRequestProto VERSION_REQUEST = + private static final VersionRequestProto VOID_VERSION_REQUEST = VersionRequestProto.newBuilder().build(); final private NamenodeProtocolPB rpcProxy; @@ -104,8 +105,9 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol, @Override public ExportedBlockKeys getBlockKeys() throws IOException { try { - return PBHelper.convert(rpcProxy.getBlockKeys(NULL_CONTROLLER, - GET_BLOCKKEYS).getKeys()); + GetBlockKeysResponseProto rsp = rpcProxy.getBlockKeys(NULL_CONTROLLER, + VOID_GET_BLOCKKEYS_REQUEST); + return rsp.hasKeys() ? PBHelper.convert(rsp.getKeys()) : null; } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -114,8 +116,8 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol, @Override public long getTransactionID() throws IOException { try { - return rpcProxy.getTransactionId(NULL_CONTROLLER, GET_TRANSACTIONID) - .getTxId(); + return rpcProxy.getTransactionId(NULL_CONTROLLER, + VOID_GET_TRANSACTIONID_REQUEST).getTxId(); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -135,7 +137,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol, public CheckpointSignature rollEditLog() throws IOException { try { return PBHelper.convert(rpcProxy.rollEditLog(NULL_CONTROLLER, - ROLL_EDITLOG).getSignature()); + VOID_ROLL_EDITLOG_REQUEST).getSignature()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -145,7 +147,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol, public NamespaceInfo versionRequest() throws IOException { try { return PBHelper.convert(rpcProxy.versionRequest(NULL_CONTROLLER, - VERSION_REQUEST).getInfo()); + VOID_VERSION_REQUEST).getInfo()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java index e87e97ff8a5..d7137158385 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolClientSideTranslatorPB.java @@ -38,6 +38,10 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements private final static RpcController NULL_CONTROLLER = null; private final RefreshAuthorizationPolicyProtocolPB rpcProxy; + private final static RefreshServiceAclRequestProto + VOID_REFRESH_SERVICE_ACL_REQUEST = + RefreshServiceAclRequestProto.newBuilder().build(); + public RefreshAuthorizationPolicyProtocolClientSideTranslatorPB( RefreshAuthorizationPolicyProtocolPB rpcProxy) { this.rpcProxy = rpcProxy; @@ -50,10 +54,9 @@ public class RefreshAuthorizationPolicyProtocolClientSideTranslatorPB implements @Override public void refreshServiceAcl() throws IOException { - RefreshServiceAclRequestProto request = RefreshServiceAclRequestProto - .newBuilder().build(); try { - rpcProxy.refreshServiceAcl(NULL_CONTROLLER, request); + rpcProxy.refreshServiceAcl(NULL_CONTROLLER, + VOID_REFRESH_SERVICE_ACL_REQUEST); } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java index 360a42d4b3e..e9644a0798d 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshAuthorizationPolicyProtocolServerSideTranslatorPB.java @@ -32,6 +32,10 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements private final RefreshAuthorizationPolicyProtocol impl; + private final static RefreshServiceAclResponseProto + VOID_REFRESH_SERVICE_ACL_RESPONSE = RefreshServiceAclResponseProto + .newBuilder().build(); + public RefreshAuthorizationPolicyProtocolServerSideTranslatorPB( RefreshAuthorizationPolicyProtocol impl) { this.impl = impl; @@ -46,6 +50,6 @@ public class RefreshAuthorizationPolicyProtocolServerSideTranslatorPB implements } catch (IOException e) { throw new ServiceException(e); } - return RefreshServiceAclResponseProto.newBuilder().build(); + return VOID_REFRESH_SERVICE_ACL_RESPONSE; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java index bed2b996045..5313a886dcb 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolClientSideTranslatorPB.java @@ -39,6 +39,14 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements private final static RpcController NULL_CONTROLLER = null; private final RefreshUserMappingsProtocolPB rpcProxy; + private final static RefreshUserToGroupsMappingsRequestProto + VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST = + RefreshUserToGroupsMappingsRequestProto.newBuilder().build(); + + private final static RefreshSuperUserGroupsConfigurationRequestProto + VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST = + RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build(); + public RefreshUserMappingsProtocolClientSideTranslatorPB( RefreshUserMappingsProtocolPB rpcProxy) { this.rpcProxy = rpcProxy; @@ -51,10 +59,9 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements @Override public void refreshUserToGroupsMappings() throws IOException { - RefreshUserToGroupsMappingsRequestProto request = - RefreshUserToGroupsMappingsRequestProto.newBuilder().build(); try { - rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER, request); + rpcProxy.refreshUserToGroupsMappings(NULL_CONTROLLER, + VOID_REFRESH_USER_TO_GROUPS_MAPPING_REQUEST); } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } @@ -62,10 +69,9 @@ public class RefreshUserMappingsProtocolClientSideTranslatorPB implements @Override public void refreshSuperUserGroupsConfiguration() throws IOException { - RefreshSuperUserGroupsConfigurationRequestProto request = - RefreshSuperUserGroupsConfigurationRequestProto.newBuilder().build(); try { - rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER, request); + rpcProxy.refreshSuperUserGroupsConfiguration(NULL_CONTROLLER, + VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_REQUEST); } catch (ServiceException se) { throw ProtobufHelper.getRemoteException(se); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java index eb351896439..4d98ea20072 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/RefreshUserMappingsProtocolServerSideTranslatorPB.java @@ -33,6 +33,15 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres private final RefreshUserMappingsProtocol impl; + private final static RefreshUserToGroupsMappingsResponseProto + VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE = + RefreshUserToGroupsMappingsResponseProto.newBuilder().build(); + + private final static RefreshSuperUserGroupsConfigurationResponseProto + VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE = + RefreshSuperUserGroupsConfigurationResponseProto.newBuilder() + .build(); + public RefreshUserMappingsProtocolServerSideTranslatorPB(RefreshUserMappingsProtocol impl) { this.impl = impl; } @@ -47,7 +56,7 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres } catch (IOException e) { throw new ServiceException(e); } - return RefreshUserToGroupsMappingsResponseProto.newBuilder().build(); + return VOID_REFRESH_USER_GROUPS_MAPPING_RESPONSE; } @Override @@ -60,7 +69,6 @@ public class RefreshUserMappingsProtocolServerSideTranslatorPB implements Refres } catch (IOException e) { throw new ServiceException(e); } - return RefreshSuperUserGroupsConfigurationResponseProto.newBuilder() - .build(); + return VOID_REFRESH_SUPERUSER_GROUPS_CONFIGURATION_RESPONSE; } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolProtocolBuffers/overview.html deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java index a232331b0b3..653c0696d3f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/protocolPB/QJournalProtocolServerSideTranslatorPB.java @@ -65,6 +65,13 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP /** Server side implementation to delegate the requests to */ private final QJournalProtocol impl; + private final static JournalResponseProto VOID_JOURNAL_RESPONSE = + JournalResponseProto.newBuilder().build(); + + private final static StartLogSegmentResponseProto + VOID_START_LOG_SEGMENT_RESPONSE = + StartLogSegmentResponseProto.newBuilder().build(); + public QJournalProtocolServerSideTranslatorPB(QJournalProtocol impl) { this.impl = impl; } @@ -135,7 +142,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP } catch (IOException e) { throw new ServiceException(e); } - return JournalResponseProto.newBuilder().build(); + return VOID_JOURNAL_RESPONSE; } /** @see JournalProtocol#heartbeat */ @@ -160,7 +167,7 @@ public class QJournalProtocolServerSideTranslatorPB implements QJournalProtocolP } catch (IOException e) { throw new ServiceException(e); } - return StartLogSegmentResponseProto.newBuilder().build(); + return VOID_START_LOG_SEGMENT_RESPONSE; } @Override diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java index c170cf9edda..6738241c046 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BPOfferService.java @@ -395,7 +395,7 @@ class BPOfferService { } @VisibleForTesting - synchronized List getBPServiceActors() { + List getBPServiceActors() { return Lists.newArrayList(bpServices); } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java index d3d2f915ca4..8a117546ff5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java @@ -388,8 +388,8 @@ class BlockPoolSliceScanner { try { adjustThrottler(); - blockSender = new BlockSender(block, 0, -1, false, true, datanode, - null); + blockSender = new BlockSender(block, 0, -1, false, true, true, + datanode, null); DataOutputStream out = new DataOutputStream(new IOUtils.NullOutputStream()); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java index bbcb2dd2e1f..fdade84f0ef 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java @@ -45,6 +45,8 @@ import org.apache.hadoop.io.nativeio.NativeIO; import org.apache.hadoop.net.SocketOutputStream; import org.apache.hadoop.util.DataChecksum; +import com.google.common.base.Preconditions; + /** * Reads a block from the disk and sends it to a recipient. * @@ -158,12 +160,14 @@ class BlockSender implements java.io.Closeable { * @param length length of data to read * @param corruptChecksumOk * @param verifyChecksum verify checksum while reading the data + * @param sendChecksum send checksum to client. * @param datanode datanode from which the block is being read * @param clientTraceFmt format string used to print client trace logs * @throws IOException */ BlockSender(ExtendedBlock block, long startOffset, long length, boolean corruptChecksumOk, boolean verifyChecksum, + boolean sendChecksum, DataNode datanode, String clientTraceFmt) throws IOException { try { @@ -175,6 +179,13 @@ class BlockSender implements java.io.Closeable { this.shouldDropCacheBehindRead = datanode.getDnConf().dropCacheBehindReads; this.datanode = datanode; + if (verifyChecksum) { + // To simplify implementation, callers may not specify verification + // without sending. + Preconditions.checkArgument(sendChecksum, + "If verifying checksum, currently must also send it."); + } + final Replica replica; final long replicaVisibleLength; synchronized(datanode.data) { @@ -213,29 +224,37 @@ class BlockSender implements java.io.Closeable { * False, True: will verify checksum * False, False: throws IOException file not found */ - DataChecksum csum; - final InputStream metaIn = datanode.data.getMetaDataInputStream(block); - if (!corruptChecksumOk || metaIn != null) { - if (metaIn == null) { - //need checksum but meta-data not found - throw new FileNotFoundException("Meta-data not found for " + block); - } - - checksumIn = new DataInputStream( - new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); + DataChecksum csum = null; + if (verifyChecksum || sendChecksum) { + final InputStream metaIn = datanode.data.getMetaDataInputStream(block); + if (!corruptChecksumOk || metaIn != null) { + if (metaIn == null) { + //need checksum but meta-data not found + throw new FileNotFoundException("Meta-data not found for " + block); + } - // read and handle the common header here. For now just a version - BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); - short version = header.getVersion(); - if (version != BlockMetadataHeader.VERSION) { - LOG.warn("Wrong version (" + version + ") for metadata file for " - + block + " ignoring ..."); + checksumIn = new DataInputStream( + new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE)); + + // read and handle the common header here. For now just a version + BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn); + short version = header.getVersion(); + if (version != BlockMetadataHeader.VERSION) { + LOG.warn("Wrong version (" + version + ") for metadata file for " + + block + " ignoring ..."); + } + csum = header.getChecksum(); + } else { + LOG.warn("Could not find metadata file for " + block); } - csum = header.getChecksum(); - } else { - LOG.warn("Could not find metadata file for " + block); - // This only decides the buffer size. Use BUFFER_SIZE? - csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 16 * 1024); + } + if (csum == null) { + // The number of bytes per checksum here determines the alignment + // of reads: we always start reading at a checksum chunk boundary, + // even if the checksum type is NULL. So, choosing too big of a value + // would risk sending too much unnecessary data. 512 (1 disk sector) + // is likely to result in minimal extra IO. + csum = DataChecksum.newDataChecksum(DataChecksum.Type.NULL, 512); } /* diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java index c1845fd152b..375c6954a7f 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java @@ -1441,7 +1441,7 @@ public class DataNode extends Configured HdfsConstants.SMALL_BUFFER_SIZE)); in = new DataInputStream(unbufIn); blockSender = new BlockSender(b, 0, b.getNumBytes(), - false, false, DataNode.this, null); + false, false, true, DataNode.this, null); DatanodeInfo srcNode = new DatanodeInfo(bpReg); // diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java index 255fd35ff35..1d4c1c3fc70 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java @@ -241,7 +241,8 @@ class DataXceiver extends Receiver implements Runnable { final Token blockToken, final String clientName, final long blockOffset, - final long length) throws IOException { + final long length, + final boolean sendChecksum) throws IOException { previousOpClientName = clientName; OutputStream baseStream = getOutputStream(); @@ -266,7 +267,7 @@ class DataXceiver extends Receiver implements Runnable { try { try { blockSender = new BlockSender(block, blockOffset, length, - true, false, datanode, clientTraceFmt); + true, false, sendChecksum, datanode, clientTraceFmt); } catch(IOException e) { String msg = "opReadBlock " + block + " received exception " + e; LOG.info(msg); @@ -654,7 +655,7 @@ class DataXceiver extends Receiver implements Runnable { try { // check if the block exists or not - blockSender = new BlockSender(block, 0, -1, false, false, datanode, + blockSender = new BlockSender(block, 0, -1, false, false, true, datanode, null); // set up response stream diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java index ce11fc9e687..e2a5035e962 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupState.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.hdfs.server.namenode; import java.io.IOException; diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java index 40286ec8ed6..5c9d164e2ff 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.io.PrintWriter; import java.net.URL; -import javax.net.SocketFactory; import javax.servlet.ServletContext; import javax.servlet.ServletException; import javax.servlet.http.HttpServletRequest; @@ -33,14 +32,11 @@ import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum; import org.apache.hadoop.hdfs.DFSClient; import org.apache.hadoop.hdfs.DFSConfigKeys; import org.apache.hadoop.hdfs.HdfsConfiguration; -import org.apache.hadoop.hdfs.protocol.ClientProtocol; import org.apache.hadoop.hdfs.protocol.DatanodeID; import org.apache.hadoop.hdfs.protocol.DatanodeInfo; -import org.apache.hadoop.hdfs.server.common.HdfsServerConstants; import org.apache.hadoop.hdfs.server.common.JspHelper; import org.apache.hadoop.hdfs.server.datanode.DataNode; import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper; -import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.util.ServletUtil; import org.znerd.xmlenc.XMLOutputter; @@ -116,18 +112,11 @@ public class FileChecksumServlets { final DataNode datanode = (DataNode) context.getAttribute("datanode"); final Configuration conf = new HdfsConfiguration(datanode.getConf()); - final int socketTimeout = conf.getInt( - DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, - HdfsServerConstants.READ_TIMEOUT); - final SocketFactory socketFactory = NetUtils.getSocketFactory(conf, - ClientProtocol.class); try { final DFSClient dfs = DatanodeJspHelper.getDFSClient(request, datanode, conf, getUGI(request, conf)); - final ClientProtocol nnproxy = dfs.getNamenode(); - final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum( - path, nnproxy, socketFactory, socketTimeout, dfs.getDataEncryptionKey(), false); + final MD5MD5CRC32FileChecksum checksum = dfs.getFileChecksum(path); MD5MD5CRC32FileChecksum.write(xml, checksum); } catch(IOException ioe) { writeXml(ioe, path, xml); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java index d2f11115882..c22d941010a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java @@ -281,6 +281,17 @@ public class SecondaryNameNode implements Runnable { LOG.info("Log Size Trigger :" + checkpointConf.getTxnCount() + " txns"); } + /** + * Wait for the service to finish. + * (Normally, it runs forever.) + */ + private void join() { + try { + infoServer.join(); + } catch (InterruptedException ie) { + } + } + /** * Shut down this instance of the datanode. * Returns only after shutdown is complete. @@ -607,6 +618,7 @@ public class SecondaryNameNode implements Runnable { if (secondary != null) { secondary.startCheckpointThread(); + secondary.join(); } } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto index 9344eaf2e2f..2ca13129326 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/ClientNamenodeProtocol.proto @@ -168,7 +168,7 @@ message RenameRequestProto { required string dst = 2; } -message RenameResponseProto { // void response +message RenameResponseProto { required bool result = 1; } @@ -393,7 +393,7 @@ message GetLinkTargetRequestProto { required string path = 1; } message GetLinkTargetResponseProto { - required string targetPath = 1; + optional string targetPath = 1; } message UpdateBlockForPipelineRequestProto { @@ -415,29 +415,6 @@ message UpdatePipelineRequestProto { message UpdatePipelineResponseProto { // void response } -message GetDelegationTokenRequestProto { - required string renewer = 1; -} - -message GetDelegationTokenResponseProto { - optional hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message RenewDelegationTokenResponseProto { - required uint64 newExireTime = 1; -} - -message CancelDelegationTokenRequestProto { - required hadoop.common.TokenProto token = 1; -} - -message CancelDelegationTokenResponseProto { // void response -} - message SetBalancerBandwidthRequestProto { required int64 bandwidth = 1; } @@ -554,12 +531,12 @@ service ClientNamenodeProtocol { returns(UpdateBlockForPipelineResponseProto); rpc updatePipeline(UpdatePipelineRequestProto) returns(UpdatePipelineResponseProto); - rpc getDelegationToken(GetDelegationTokenRequestProto) - returns(GetDelegationTokenResponseProto); - rpc renewDelegationToken(RenewDelegationTokenRequestProto) - returns(RenewDelegationTokenResponseProto); - rpc cancelDelegationToken(CancelDelegationTokenRequestProto) - returns(CancelDelegationTokenResponseProto); + rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto) + returns(hadoop.common.GetDelegationTokenResponseProto); + rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto) + returns(hadoop.common.RenewDelegationTokenResponseProto); + rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto) + returns(hadoop.common.CancelDelegationTokenResponseProto); rpc setBalancerBandwidth(SetBalancerBandwidthRequestProto) returns(SetBalancerBandwidthResponseProto); rpc getDataEncryptionKey(GetDataEncryptionKeyRequestProto) diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto index 62884c66815..24e72fa2f93 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/NamenodeProtocol.proto @@ -56,7 +56,7 @@ message GetBlockKeysRequestProto { * keys - Information about block keys at the active namenode */ message GetBlockKeysResponseProto { - required ExportedBlockKeysProto keys = 1; + optional ExportedBlockKeysProto keys = 1; } /** diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto index 8ce5fd75661..0e78e7b3d58 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/proto/datatransfer.proto @@ -52,6 +52,7 @@ message OpReadBlockProto { required ClientOperationHeaderProto header = 1; required uint64 offset = 2; required uint64 len = 3; + optional bool sendChecksums = 4 [default = true]; } @@ -182,5 +183,5 @@ message OpBlockChecksumResponseProto { required uint32 bytesPerCrc = 1; required uint64 crcPerBlock = 2; required bytes md5 = 3; - optional ChecksumTypeProto crcType = 4 [default = CHECKSUM_CRC32]; + optional ChecksumTypeProto crcType = 4; } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 10b874b6855..59603a96eb1 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 20addd74b00..5889c12d329 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.hdfs.DFSClient$Renewer org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer org.apache.hadoop.hdfs.HftpFileSystem$TokenManager diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java index 95008348bda..9d18c1d6433 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java @@ -48,7 +48,6 @@ import java.io.IOException; import java.io.PrintWriter; import java.io.RandomAccessFile; import java.net.InetSocketAddress; -import java.net.ServerSocket; import java.net.URI; import java.net.URISyntaxException; import java.nio.channels.FileChannel; @@ -2290,19 +2289,6 @@ public class MiniDFSCluster { return nameNodes[nnIndex].nameNode; } - private int getFreeSocketPort() { - int port = 0; - try { - ServerSocket s = new ServerSocket(0); - port = s.getLocalPort(); - s.close(); - return port; - } catch (IOException e) { - // Could not get a free port. Return default port 0. - } - return port; - } - protected void setupDatanodeAddress(Configuration conf, boolean setupHostsFile, boolean checkDataNodeAddrConfig) throws IOException { if (setupHostsFile) { @@ -2311,7 +2297,7 @@ public class MiniDFSCluster { throw new IOException("Parameter dfs.hosts is not setup in conf"); } // Setup datanode in the include file, if it is defined in the conf - String address = "127.0.0.1:" + getFreeSocketPort(); + String address = "127.0.0.1:" + NetUtils.getFreeSocketPort(); if (checkDataNodeAddrConfig) { conf.setIfUnset(DFS_DATANODE_ADDRESS_KEY, address); } else { diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java index 77ea9c5907e..d699f750fc5 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java @@ -444,21 +444,21 @@ public class TestDataTransferProtocol { recvBuf.reset(); blk.setBlockId(blkid-1); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen); + 0L, fileLen, true); sendRecvData("Wrong block ID " + newBlockId + " for read", false); // negative block start offset -1L sendBuf.reset(); blk.setBlockId(blkid); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - -1L, fileLen); + -1L, fileLen, true); sendRecvData("Negative start-offset for read for block " + firstBlock.getBlockId(), false); // bad block start offset sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - fileLen, fileLen); + fileLen, fileLen, true); sendRecvData("Wrong start-offset for reading block " + firstBlock.getBlockId(), false); @@ -475,7 +475,7 @@ public class TestDataTransferProtocol { sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, -1L-random.nextInt(oneMil)); + 0L, -1L-random.nextInt(oneMil), true); sendRecvData("Negative length for reading block " + firstBlock.getBlockId(), false); @@ -488,14 +488,14 @@ public class TestDataTransferProtocol { recvOut); sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen+1); + 0L, fileLen+1, true); sendRecvData("Wrong length for reading block " + firstBlock.getBlockId(), false); //At the end of all this, read the file to make sure that succeeds finally. sendBuf.reset(); sender.readBlock(blk, BlockTokenSecretManager.DUMMY_TOKEN, "cl", - 0L, fileLen); + 0L, fileLen, true); readFile(fileSys, file, fileLen); } finally { cluster.shutdown(); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java index b4320520354..fa384cde7a6 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelRead.java @@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs; import java.io.IOException; +import org.apache.commons.logging.impl.Log4JLogger; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; +import org.apache.log4j.Level; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -56,4 +59,11 @@ public class TestParallelRead extends TestParallelReadUtil { public void testParallelReadMixed() throws IOException { runTestWorkload(new MixedWorkloadHelper()); } + + @Test + public void testParallelNoChecksums() throws IOException { + verifyChecksums = false; + runTestWorkload(new MixedWorkloadHelper()); + } + } diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java index 1c59eca871d..51c3200d2ce 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelReadUtil.java @@ -46,6 +46,7 @@ public class TestParallelReadUtil { static final int FILE_SIZE_K = 256; static Random rand = null; static final int DEFAULT_REPLICATION_FACTOR = 2; + protected boolean verifyChecksums = true; static { // The client-trace log ends up causing a lot of blocking threads @@ -317,7 +318,8 @@ public class TestParallelReadUtil { testInfo.filepath = new Path("/TestParallelRead.dat." + i); testInfo.authenticData = util.writeFile(testInfo.filepath, FILE_SIZE_K); - testInfo.dis = dfsClient.open(testInfo.filepath.toString()); + testInfo.dis = dfsClient.open(testInfo.filepath.toString(), + dfsClient.dfsClientConf.ioBufferSize, verifyChecksums); for (int j = 0; j < nWorkerEach; ++j) { workers[nWorkers++] = new ReadWorker(testInfo, nWorkers, helper); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java index 1e0681f4711..9afa493391a 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPread.java @@ -24,11 +24,14 @@ import java.io.DataOutputStream; import java.io.IOException; import java.util.Random; +import org.apache.commons.logging.impl.Log4JLogger; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol; import org.apache.hadoop.hdfs.server.datanode.SimulatedFSDataset; +import org.apache.log4j.Level; import org.junit.Test; /** @@ -194,11 +197,19 @@ public class TestPread { */ @Test public void testPreadDFS() throws IOException { - dfsPreadTest(false); //normal pread - dfsPreadTest(true); //trigger read code path without transferTo. + dfsPreadTest(false, true); //normal pread + dfsPreadTest(true, true); //trigger read code path without transferTo. } - private void dfsPreadTest(boolean disableTransferTo) throws IOException { + @Test + public void testPreadDFSNoChecksum() throws IOException { + ((Log4JLogger)DataTransferProtocol.LOG).getLogger().setLevel(Level.ALL); + dfsPreadTest(false, false); + dfsPreadTest(true, false); + } + + private void dfsPreadTest(boolean disableTransferTo, boolean verifyChecksum) + throws IOException { Configuration conf = new HdfsConfiguration(); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096); conf.setLong(DFSConfigKeys.DFS_CLIENT_READ_PREFETCH_SIZE_KEY, 4096); @@ -210,6 +221,7 @@ public class TestPread { } MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build(); FileSystem fileSys = cluster.getFileSystem(); + fileSys.setVerifyChecksum(verifyChecksum); try { Path file1 = new Path("preadtest.dat"); writeFile(fileSys, file1); diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java index d6f9171b1d6..00f9815537c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/FakeRenewer.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.tools; import java.io.IOException; @@ -37,4 +54,4 @@ public class FakeRenewer extends TokenRenewer { lastRenewed = null; lastCanceled = null; } -} \ No newline at end of file +} diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 721b9961607..e514c9b647c 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.tools.FakeRenewer diff --git a/hadoop-hdfs-project/pom.xml b/hadoop-hdfs-project/pom.xml index 27161004a36..49596ef804e 100644 --- a/hadoop-hdfs-project/pom.xml +++ b/hadoop-hdfs-project/pom.xml @@ -48,9 +48,6 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd"> org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt index 6ec1fcfb8cb..be38a08089f 100644 --- a/hadoop-mapreduce-project/CHANGES.txt +++ b/hadoop-mapreduce-project/CHANGES.txt @@ -19,6 +19,8 @@ Trunk (Unreleased) MAPREDUCE-4887. Add RehashPartitioner, to smooth distributions with poor implementations of Object#hashCode(). (Radim Kolar via cutting) + MAPREDUCE-4808. Refactor MapOutput and MergeManager to facilitate reuse by Shuffle implementations. (masokan via tucu) + IMPROVEMENTS MAPREDUCE-3787. [Gridmix] Optimize job monitoring and STRESS mode for @@ -151,9 +153,6 @@ Trunk (Unreleased) MAPREDUCE-3223. Remove MR1 configs from mapred-default.xml (tlipcon via harsh) - MAPREDUCE-4678. Running the Pentomino example with defaults throws - java.lang.NegativeArraySizeException (Chris McConnell via harsh) - MAPREDUCE-4695. Fix LocalRunner on trunk after MAPREDUCE-3223 broke it (harsh) @@ -170,6 +169,9 @@ Release 2.0.3-alpha - Unreleased MAPREDUCE-4123. Remove the 'mapred groups' command, which is no longer supported. (Devaraj K via sseth) + MAPREDUCE-4938. Use token request messages defined in hadoop common. + (suresh) + NEW FEATURES MAPREDUCE-4520. Added support for MapReduce applications to request for @@ -207,6 +209,8 @@ Release 2.0.3-alpha - Unreleased MAPREDUCE-4907. TrackerDistributedCacheManager issues too many getFileStatus calls. (sandyr via tucu) + MAPREDUCE-4949. Enable multiple pi jobs to run in parallel. (sandyr via tucu) + OPTIMIZATIONS BUG FIXES @@ -253,6 +257,17 @@ Release 2.0.3-alpha - Unreleased MAPREDUCE-1700. User supplied dependencies may conflict with MapReduce system JARs. (tomwhite) + MAPREDUCE-4936. JobImpl uber checks for cpu are wrong (Arun C Murthy via + jlowe) + + MAPREDUCE-4924. flakey test: org.apache.hadoop.mapred.TestClusterMRNotification.testMR. + (rkanter via tucu) + + MAPREDUCE-4923. Add toString method to TaggedInputSplit. (sandyr via tucu) + + MAPREDUCE-4948. Fix a failing unit test TestYARNRunner.testHistoryServerToken. + (Junping Du via sseth) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -629,6 +644,24 @@ Release 2.0.0-alpha - 05-23-2012 MAPREDUCE-4444. nodemanager fails to start when one of the local-dirs is bad (Jason Lowe via bobby) +Release 0.23.7 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + MAPREDUCE-4946. Fix a performance problem for large jobs by reducing the + number of map completion event type conversions. (Jason Lowe via sseth) + + BUG FIXES + + MAPREDUCE-4458. Warn if java.library.path is used for AM or Task + (Robert Parker via jeagles) + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES @@ -694,7 +727,15 @@ Release 0.23.6 - UNRELEASED MAPREDUCE-4921. JobClient should acquire HS token with RM principal (daryn via bobby) -Release 0.23.5 - UNRELEASED + MAPREDUCE-4934. Maven RAT plugin is not checking all source files (tgraves) + + MAPREDUCE-4678. Running the Pentomino example with defaults throws + java.lang.NegativeArraySizeException (Chris McConnell via harsh) + + MAPREDUCE-4925. The pentomino option parser may be buggy. + (Karthik Kambatla via harsh) + +Release 0.23.5 - 2012-11-28 INCOMPATIBLE CHANGES diff --git a/hadoop-mapreduce-project/conf/mapred-site.xml.template b/hadoop-mapreduce-project/conf/mapred-site.xml.template index 970c8fe0e8d..761c352dd09 100644 --- a/hadoop-mapreduce-project/conf/mapred-site.xml.template +++ b/hadoop-mapreduce-project/conf/mapred-site.xml.template @@ -1,5 +1,18 @@ + diff --git a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml index 08d4c2e7f68..ecac4244c30 100644 --- a/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml +++ b/hadoop-mapreduce-project/dev-support/findbugs-exclude.xml @@ -268,7 +268,7 @@ This class is unlikely to get subclassed, so ignore --> - + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java index f32b5d59b7c..38a43454ee0 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapred/TaskAttemptListenerImpl.java @@ -275,14 +275,13 @@ public class TaskAttemptListenerImpl extends CompositeService boolean shouldReset = false; org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = TypeConverter.toYarn(taskAttemptID); - org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent[] events = + TaskCompletionEvent[] events = context.getJob(attemptID.getTaskId().getJobId()).getMapAttemptCompletionEvents( startIndex, maxEvents); taskHeartbeatHandler.progressing(attemptID); - return new MapTaskCompletionEventsUpdate( - TypeConverter.fromYarn(events), shouldReset); + return new MapTaskCompletionEventsUpdate(events, shouldReset); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java index 03811026244..2822a880246 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java @@ -125,8 +125,8 @@ public class MRClientService extends AbstractService .getenv(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME); byte[] bytes = Base64.decodeBase64(secretKeyStr); secretManager = - new ClientToAMTokenSecretManager(this.appContext.getApplicationID(), - bytes); + new ClientToAMTokenSecretManager( + this.appContext.getApplicationAttemptId(), bytes); } server = rpc.getServer(MRClientProtocol.class, protocolHandler, address, diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java index ffa245bfb40..b14abcc6d55 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java @@ -24,6 +24,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; @@ -88,7 +89,7 @@ public interface Job { TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents(int fromEventId, int maxEvents); - TaskAttemptCompletionEvent[] + TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents); /** diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java index 7306cda792b..fa8764a412f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java @@ -43,6 +43,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobACLsManager; import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.JobContext; @@ -130,6 +131,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, private static final TaskAttemptCompletionEvent[] EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS = new TaskAttemptCompletionEvent[0]; + private static final TaskCompletionEvent[] + EMPTY_TASK_COMPLETION_EVENTS = new TaskCompletionEvent[0]; + private static final Log LOG = LogFactory.getLog(JobImpl.class); //The maximum fraction of fetch failures allowed for a map @@ -196,7 +200,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, private int allowedMapFailuresPercent = 0; private int allowedReduceFailuresPercent = 0; private List taskAttemptCompletionEvents; - private List mapAttemptCompletionEvents; + private List mapAttemptCompletionEvents; + private List taskCompletionIdxToMapCompletionIdx; private final List diagnostics = new ArrayList(); //task/attempt related datastructures @@ -684,27 +689,31 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, @Override public TaskAttemptCompletionEvent[] getTaskAttemptCompletionEvents( int fromEventId, int maxEvents) { - return getAttemptCompletionEvents(taskAttemptCompletionEvents, - fromEventId, maxEvents); - } - - @Override - public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents( - int startIndex, int maxEvents) { - return getAttemptCompletionEvents(mapAttemptCompletionEvents, - startIndex, maxEvents); - } - - private TaskAttemptCompletionEvent[] getAttemptCompletionEvents( - List eventList, - int startIndex, int maxEvents) { TaskAttemptCompletionEvent[] events = EMPTY_TASK_ATTEMPT_COMPLETION_EVENTS; readLock.lock(); try { - if (eventList.size() > startIndex) { + if (taskAttemptCompletionEvents.size() > fromEventId) { int actualMax = Math.min(maxEvents, - (eventList.size() - startIndex)); - events = eventList.subList(startIndex, + (taskAttemptCompletionEvents.size() - fromEventId)); + events = taskAttemptCompletionEvents.subList(fromEventId, + actualMax + fromEventId).toArray(events); + } + return events; + } finally { + readLock.unlock(); + } + } + + @Override + public TaskCompletionEvent[] getMapAttemptCompletionEvents( + int startIndex, int maxEvents) { + TaskCompletionEvent[] events = EMPTY_TASK_COMPLETION_EVENTS; + readLock.lock(); + try { + if (mapAttemptCompletionEvents.size() > startIndex) { + int actualMax = Math.min(maxEvents, + (mapAttemptCompletionEvents.size() - startIndex)); + events = mapAttemptCompletionEvents.subList(startIndex, actualMax + startIndex).toArray(events); } return events; @@ -1068,9 +1077,13 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, boolean smallCpu = ( Math.max( - conf.getInt(MRJobConfig.MAP_CPU_VCORES, 1), - conf.getInt(MRJobConfig.REDUCE_CPU_VCORES, 1)) < - sysCPUSizeForUberSlot + conf.getInt( + MRJobConfig.MAP_CPU_VCORES, + MRJobConfig.DEFAULT_MAP_CPU_VCORES), + conf.getInt( + MRJobConfig.REDUCE_CPU_VCORES, + MRJobConfig.DEFAULT_REDUCE_CPU_VCORES)) + <= sysCPUSizeForUberSlot ); boolean notChainJob = !isChainJob(conf); @@ -1243,7 +1256,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, new ArrayList( job.numMapTasks + job.numReduceTasks + 10); job.mapAttemptCompletionEvents = - new ArrayList(job.numMapTasks + 10); + new ArrayList(job.numMapTasks + 10); + job.taskCompletionIdxToMapCompletionIdx = new ArrayList( + job.numMapTasks + job.numReduceTasks + 10); job.allowedMapFailuresPercent = job.conf.getInt(MRJobConfig.MAP_FAILURES_MAX_PERCENT, 0); @@ -1558,19 +1573,37 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job, //eventId is equal to index in the arraylist tce.setEventId(job.taskAttemptCompletionEvents.size()); job.taskAttemptCompletionEvents.add(tce); + int mapEventIdx = -1; if (TaskType.MAP.equals(tce.getAttemptId().getTaskId().getTaskType())) { - job.mapAttemptCompletionEvents.add(tce); + // we track map completions separately from task completions because + // - getMapAttemptCompletionEvents uses index ranges specific to maps + // - type converting the same events over and over is expensive + mapEventIdx = job.mapAttemptCompletionEvents.size(); + job.mapAttemptCompletionEvents.add(TypeConverter.fromYarn(tce)); } + job.taskCompletionIdxToMapCompletionIdx.add(mapEventIdx); TaskAttemptId attemptId = tce.getAttemptId(); TaskId taskId = attemptId.getTaskId(); //make the previous completion event as obsolete if it exists - Object successEventNo = - job.successAttemptCompletionEventNoMap.remove(taskId); + Integer successEventNo = + job.successAttemptCompletionEventNoMap.remove(taskId); if (successEventNo != null) { TaskAttemptCompletionEvent successEvent = - job.taskAttemptCompletionEvents.get((Integer) successEventNo); + job.taskAttemptCompletionEvents.get(successEventNo); successEvent.setStatus(TaskAttemptCompletionEventStatus.OBSOLETE); + int mapCompletionIdx = + job.taskCompletionIdxToMapCompletionIdx.get(successEventNo); + if (mapCompletionIdx >= 0) { + // update the corresponding TaskCompletionEvent for the map + TaskCompletionEvent mapEvent = + job.mapAttemptCompletionEvents.get(mapCompletionIdx); + job.mapAttemptCompletionEvents.set(mapCompletionIdx, + new TaskCompletionEvent(mapEvent.getEventId(), + mapEvent.getTaskAttemptId(), mapEvent.idWithinJob(), + mapEvent.isMapTask(), TaskCompletionEvent.Status.OBSOLETE, + mapEvent.getTaskTrackerHttp())); + } } // if this attempt is not successful then why is the previous successful diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 35c4af09a71..3f30deb069a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.app.MRClientSecurityInfo diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java index dfeed7f3f49..b58ad347e67 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java @@ -34,6 +34,7 @@ import java.util.Arrays; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.mapreduce.TaskType; +import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager; import org.apache.hadoop.mapreduce.v2.api.records.JobId; import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptCompletionEvent; @@ -153,9 +154,12 @@ public class TestTaskAttemptListenerImpl { .thenReturn(Arrays.copyOfRange(taskEvents, 0, 2)); when(mockJob.getTaskAttemptCompletionEvents(2, 100)) .thenReturn(Arrays.copyOfRange(taskEvents, 2, 4)); - when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn(mapEvents); - when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn(mapEvents); - when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn(empty); + when(mockJob.getMapAttemptCompletionEvents(0, 100)).thenReturn( + TypeConverter.fromYarn(mapEvents)); + when(mockJob.getMapAttemptCompletionEvents(0, 2)).thenReturn( + TypeConverter.fromYarn(mapEvents)); + when(mockJob.getMapAttemptCompletionEvents(2, 100)).thenReturn( + TypeConverter.fromYarn(empty)); AppContext appCtx = mock(AppContext.class); when(appCtx.getJob(any(JobId.class))).thenReturn(mockJob); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java index 638a8da86ca..5bab5cd3518 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java @@ -33,6 +33,7 @@ import org.apache.hadoop.fs.FileContext; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.JobACLsManager; import org.apache.hadoop.mapred.ShuffleHandler; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.FileSystemCounter; import org.apache.hadoop.mapreduce.JobACL; @@ -556,7 +557,7 @@ public class MockJobs extends MockApps { } @Override - public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents( + public TaskCompletionEvent[] getMapAttemptCompletionEvents( int startIndex, int maxEvents) { return null; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java index 0c9832477a8..8edd07c2774 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFetchFailure.java @@ -25,8 +25,10 @@ import java.util.Arrays; import java.util.Iterator; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.TypeConverter; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent; import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler; import org.apache.hadoop.mapreduce.v2.api.records.JobState; @@ -150,14 +152,16 @@ public class TestFetchFailure { Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus()); - TaskAttemptCompletionEvent mapEvents[] = + TaskCompletionEvent mapEvents[] = job.getMapAttemptCompletionEvents(0, 2); + TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events); Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length); Assert.assertArrayEquals("Unexpected map events", - Arrays.copyOfRange(events, 0, 2), mapEvents); + Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents); mapEvents = job.getMapAttemptCompletionEvents(2, 200); Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length); - Assert.assertEquals("Unexpected map event", events[2], mapEvents[0]); + Assert.assertEquals("Unexpected map event", convertedEvents[2], + mapEvents[0]); } /** @@ -395,14 +399,16 @@ public class TestFetchFailure { Assert.assertEquals("Event status not correct for reduce attempt1", TaskAttemptCompletionEventStatus.SUCCEEDED, events[3].getStatus()); - TaskAttemptCompletionEvent mapEvents[] = + TaskCompletionEvent mapEvents[] = job.getMapAttemptCompletionEvents(0, 2); + TaskCompletionEvent convertedEvents[] = TypeConverter.fromYarn(events); Assert.assertEquals("Incorrect number of map events", 2, mapEvents.length); Assert.assertArrayEquals("Unexpected map events", - Arrays.copyOfRange(events, 0, 2), mapEvents); + Arrays.copyOfRange(convertedEvents, 0, 2), mapEvents); mapEvents = job.getMapAttemptCompletionEvents(2, 200); Assert.assertEquals("Incorrect number of map events", 1, mapEvents.length); - Assert.assertEquals("Unexpected map event", events[2], mapEvents[0]); + Assert.assertEquals("Unexpected map event", convertedEvents[2], + mapEvents[0]); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java index be897fa37db..2ddab831275 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java @@ -32,6 +32,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; @@ -441,7 +442,7 @@ public class TestRuntimeEstimators { } @Override - public TaskAttemptCompletionEvent[] + public TaskCompletionEvent[] getMapAttemptCompletionEvents(int startIndex, int maxEvents) { throw new UnsupportedOperationException("Not supported yet."); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java index a53bbe69072..05164173c96 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/launcher/TestContainerLauncherImpl.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.app.launcher; import static org.mockito.Matchers.any; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java index 572e4942dba..91bbcb066fd 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/local/TestLocalContainerAllocator.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.app.local; import static org.mockito.Matchers.isA; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java index 930163a56ec..ad2ce63144f 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/MRClientProtocolPBClientImpl.java @@ -82,10 +82,8 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskReques import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskAttemptCompletionEventsRequestProto; @@ -95,7 +93,9 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetTaskReportsReques import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillJobRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; @@ -109,8 +109,7 @@ public class MRClientProtocolPBClientImpl implements MRClientProtocol { public MRClientProtocolPBClientImpl(long clientVersion, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, MRClientProtocolPB.class, ProtobufRpcEngine.class); - proxy = (MRClientProtocolPB)RPC.getProxy( - MRClientProtocolPB.class, clientVersion, addr, conf); + proxy = RPC.getProxy(MRClientProtocolPB.class, clientVersion, addr, conf); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java index 492272ce8e6..0cfb5279e3a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/MRClientProtocolPBServiceImpl.java @@ -73,14 +73,10 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskReques import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.KillTaskResponsePBImpl; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenRequestPBImpl; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb.RenewDelegationTokenResponsePBImpl; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.FailTaskAttemptResponseProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetCountersResponseProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDiagnosticsResponseProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetJobReportRequestProto; @@ -99,8 +95,12 @@ import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptReque import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskAttemptResponseProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskRequestProto; import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.KillTaskResponseProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import com.google.protobuf.RpcController; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java index 68ffcccf28f..8a4045552e2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenRequestProtoOrBuilder; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.ProtoBase; @@ -52,10 +52,7 @@ public class CancelDelegationTokenRequestPBImpl extends if (this.token != null) { return this.token; } - if (!p.hasDelegationToken()) { - return null; - } - this.token = convertFromProtoFormat(p.getDelegationToken()); + this.token = convertFromProtoFormat(p.getToken()); return this.token; } @@ -63,7 +60,7 @@ public class CancelDelegationTokenRequestPBImpl extends public void setDelegationToken(DelegationToken token) { maybeInitBuilder(); if (token == null) - builder.clearDelegationToken(); + builder.clearToken(); this.token = token; } @@ -78,7 +75,7 @@ public class CancelDelegationTokenRequestPBImpl extends private void mergeLocalToBuilder() { if (token != null) { - builder.setDelegationToken(convertToProtoFormat(this.token)); + builder.setToken(convertToProtoFormat(this.token)); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java index 4c7989aad17..59f0ae9f9cb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java @@ -18,7 +18,7 @@ package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenResponse; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.yarn.api.records.ProtoBase; public class CancelDelegationTokenResponsePBImpl extends diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java index 9cf26a2620b..b028c53b371 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenRequestProtoOrBuilder; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.yarn.api.records.ProtoBase; @@ -50,9 +50,6 @@ public class GetDelegationTokenRequestPBImpl extends if (this.renewer != null) { return this.renewer; } - if (!p.hasRenewer()) { - return null; - } this.renewer = p.getRenewer(); return this.renewer; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java index 59675225734..69111645d99 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java @@ -18,14 +18,13 @@ package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.GetDelegationTokenResponseProtoOrBuilder; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProtoOrBuilder; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.yarn.api.records.impl.pb.DelegationTokenPBImpl; - public class GetDelegationTokenResponsePBImpl extends ProtoBase implements GetDelegationTokenResponse { @@ -53,10 +52,10 @@ public class GetDelegationTokenResponsePBImpl extends if (this.mrToken != null) { return this.mrToken; } - if (!p.hasMRDelegationToken()) { + if (!p.hasToken()) { return null; } - this.mrToken = convertFromProtoFormat(p.getMRDelegationToken()); + this.mrToken = convertFromProtoFormat(p.getToken()); return this.mrToken; } @@ -64,7 +63,7 @@ public class GetDelegationTokenResponsePBImpl extends public void setDelegationToken(DelegationToken mrToken) { maybeInitBuilder(); if (mrToken == null) - builder.clearMRDelegationToken(); + builder.getToken(); this.mrToken = mrToken; } @@ -79,7 +78,7 @@ public class GetDelegationTokenResponsePBImpl extends private void mergeLocalToBuilder() { if (mrToken != null) { - builder.setMRDelegationToken(convertToProtoFormat(this.mrToken)); + builder.setToken(convertToProtoFormat(this.mrToken)); } } @@ -97,7 +96,6 @@ public class GetDelegationTokenResponsePBImpl extends } viaProto = false; } - private DelegationTokenPBImpl convertFromProtoFormat(TokenProto p) { return new DelegationTokenPBImpl(p); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java index 7d9017d9356..5b616b6b9a1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java @@ -18,8 +18,8 @@ package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenRequestProtoOrBuilder; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.ProtoBase; @@ -52,10 +52,7 @@ public class RenewDelegationTokenRequestPBImpl extends if (this.token != null) { return this.token; } - if (!p.hasDelegationToken()) { - return null; - } - this.token = convertFromProtoFormat(p.getDelegationToken()); + this.token = convertFromProtoFormat(p.getToken()); return this.token; } @@ -63,7 +60,7 @@ public class RenewDelegationTokenRequestPBImpl extends public void setDelegationToken(DelegationToken token) { maybeInitBuilder(); if (token == null) - builder.clearDelegationToken(); + builder.clearToken(); this.token = token; } @@ -77,7 +74,7 @@ public class RenewDelegationTokenRequestPBImpl extends private void mergeLocalToBuilder() { if (token != null) { - builder.setDelegationToken(convertToProtoFormat(this.token)); + builder.setToken(convertToProtoFormat(this.token)); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java index 0a9e1275e60..beb78de7c17 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java @@ -19,8 +19,8 @@ package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb; import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenResponseProto; -import org.apache.hadoop.mapreduce.v2.proto.MRServiceProtos.RenewDelegationTokenResponseProtoOrBuilder; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProtoOrBuilder; import org.apache.hadoop.yarn.api.records.ProtoBase; public class RenewDelegationTokenResponsePBImpl extends @@ -59,12 +59,12 @@ public class RenewDelegationTokenResponsePBImpl extends @Override public long getNextExpirationTime() { RenewDelegationTokenResponseProtoOrBuilder p = viaProto ? proto : builder; - return p.getNextExpiryTs(); + return p.getNewExpiryTime(); } @Override public void setNextExpirationTime(long expTime) { maybeInitBuilder(); - builder.setNextExpiryTs(expTime); + builder.setNewExpiryTime(expTime); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto index f9de094f43d..83a946f4cee 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/MRClientProtocol.proto @@ -20,6 +20,7 @@ option java_package = "org.apache.hadoop.yarn.proto"; option java_outer_classname = "MRClientProtocol"; option java_generic_services = true; +import "Security.proto"; import "mr_service_protos.proto"; /* If making changes to this, please edit HSClientProtocolService */ @@ -31,11 +32,11 @@ service MRClientProtocolService { rpc getTaskAttemptCompletionEvents (GetTaskAttemptCompletionEventsRequestProto) returns (GetTaskAttemptCompletionEventsResponseProto); rpc getTaskReports (GetTaskReportsRequestProto) returns (GetTaskReportsResponseProto); rpc getDiagnostics (GetDiagnosticsRequestProto) returns (GetDiagnosticsResponseProto); - rpc getDelegationToken (GetDelegationTokenRequestProto) returns (GetDelegationTokenResponseProto); + rpc getDelegationToken (hadoop.common.GetDelegationTokenRequestProto) returns (hadoop.common.GetDelegationTokenResponseProto); rpc killJob (KillJobRequestProto) returns (KillJobResponseProto); rpc killTask (KillTaskRequestProto) returns (KillTaskResponseProto); rpc killTaskAttempt (KillTaskAttemptRequestProto) returns (KillTaskAttemptResponseProto); rpc failTaskAttempt (FailTaskAttemptRequestProto) returns (FailTaskAttemptResponseProto); - rpc renewDelegationToken(RenewDelegationTokenRequestProto) returns (RenewDelegationTokenResponseProto); - rpc cancelDelegationToken(CancelDelegationTokenRequestProto) returns (CancelDelegationTokenResponseProto); + rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto) returns (hadoop.common.RenewDelegationTokenResponseProto); + rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto) returns (hadoop.common.CancelDelegationTokenResponseProto); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto index 12c05209f3d..ff965f30774 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_service_protos.proto @@ -77,14 +77,6 @@ message GetDiagnosticsResponseProto { repeated string diagnostics = 1; } -message GetDelegationTokenRequestProto { - optional string renewer = 1; -} - -message GetDelegationTokenResponseProto { - optional hadoop.common.TokenProto m_r_delegation_token = 1; -} - message KillJobRequestProto { optional JobIdProto job_id = 1; } @@ -109,17 +101,3 @@ message FailTaskAttemptRequestProto { message FailTaskAttemptResponseProto { } -message RenewDelegationTokenRequestProto { - required hadoop.common.TokenProto delegation_token = 1; -} - -message RenewDelegationTokenResponseProto { - required int64 next_expiry_ts = 1; -} - -message CancelDelegationTokenRequestProto { - required hadoop.common.TokenProto delegation_token = 1; -} - -message CancelDelegationTokenResponseProto { -} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 0975deab7e7..cc2c32d75aa 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.api.MRDelegationTokenIdentifier diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 76846fc1c5e..aa5b6f120d8 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.security.MRDelegationTokenRenewer diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java index 9b75a13c781..494e1c10611 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/lib/TaggedInputSplit.java @@ -138,4 +138,9 @@ class TaggedInputSplit implements Configurable, InputSplit { this.conf = conf; } + @Override + public String toString() { + return inputSplit.toString(); + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java index 5b154671605..5fc7144a8cb 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java @@ -530,6 +530,9 @@ public interface MRJobConfig { public static final String MR_AM_ENV = MR_AM_PREFIX + "env"; + public static final String MR_AM_ADMIN_USER_ENV = + MR_AM_PREFIX + "admin.user.env"; + public static final String MAPRED_MAP_ADMIN_JAVA_OPTS = "mapreduce.admin.map.child.java.opts"; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java index 7762f1dd1b6..fd07d00fa7d 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/TaggedInputSplit.java @@ -157,4 +157,9 @@ class TaggedInputSplit extends InputSplit implements Configurable, Writable { this.conf = conf; } + @Override + public String toString() { + return inputSplit.toString(); + } + } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java index f2cbc6e4ed7..e35cb6cdcf5 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java @@ -19,8 +19,6 @@ package org.apache.hadoop.mapreduce.task.reduce; import java.io.DataInputStream; import java.io.IOException; -import java.io.InputStream; -import java.io.OutputStream; import java.net.ConnectException; import java.net.HttpURLConnection; import java.net.MalformedURLException; @@ -38,12 +36,7 @@ import javax.net.ssl.HttpsURLConnection; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.io.IOUtils; -import org.apache.hadoop.io.compress.CodecPool; -import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.io.compress.Decompressor; -import org.apache.hadoop.io.compress.DefaultCodec; import org.apache.hadoop.mapred.Counters; -import org.apache.hadoop.mapred.IFileInputStream; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapreduce.MRConfig; @@ -51,9 +44,6 @@ import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; import org.apache.hadoop.mapreduce.security.SecureShuffleUtils; import org.apache.hadoop.security.ssl.SSLFactory; -import org.apache.hadoop.mapreduce.task.reduce.MapOutput.Type; -import org.apache.hadoop.util.Progressable; -import org.apache.hadoop.util.ReflectionUtils; import com.google.common.annotations.VisibleForTesting; @@ -70,7 +60,7 @@ class Fetcher extends Thread { /* Default read timeout (in milliseconds) */ private final static int DEFAULT_READ_TIMEOUT = 3 * 60 * 1000; - private final Progressable reporter; + private final Reporter reporter; private static enum ShuffleErrors{IO_ERROR, WRONG_LENGTH, BAD_ID, WRONG_MAP, CONNECTION, WRONG_REDUCE} @@ -92,15 +82,10 @@ class Fetcher extends Thread { private final int connectionTimeout; private final int readTimeout; - // Decompression of map-outputs - private final CompressionCodec codec; - private final Decompressor decompressor; private final SecretKey jobTokenSecret; private volatile boolean stopped = false; - private JobConf job; - private static boolean sslShuffle; private static SSLFactory sslFactory; @@ -108,7 +93,6 @@ class Fetcher extends Thread { ShuffleScheduler scheduler, MergeManager merger, Reporter reporter, ShuffleClientMetrics metrics, ExceptionReporter exceptionReporter, SecretKey jobTokenSecret) { - this.job = job; this.reporter = reporter; this.scheduler = scheduler; this.merger = merger; @@ -130,16 +114,6 @@ class Fetcher extends Thread { wrongReduceErrs = reporter.getCounter(SHUFFLE_ERR_GRP_NAME, ShuffleErrors.WRONG_REDUCE.toString()); - if (job.getCompressMapOutput()) { - Class codecClass = - job.getMapOutputCompressorClass(DefaultCodec.class); - codec = ReflectionUtils.newInstance(codecClass, job); - decompressor = CodecPool.getDecompressor(codec); - } else { - codec = null; - decompressor = null; - } - this.connectionTimeout = job.getInt(MRJobConfig.SHUFFLE_CONNECT_TIMEOUT, DEFAULT_STALLED_COPY_TIMEOUT); @@ -170,7 +144,7 @@ class Fetcher extends Thread { MapHost host = null; try { // If merge is on, block - merger.waitForInMemoryMerge(); + merger.waitForResource(); // Get a host to shuffle from host = scheduler.getHost(); @@ -386,8 +360,8 @@ class Fetcher extends Thread { mapOutput = merger.reserve(mapId, decompressedLength, id); // Check if we can shuffle *now* ... - if (mapOutput.getType() == Type.WAIT) { - LOG.info("fetcher#" + id + " - MergerManager returned Status.WAIT ..."); + if (mapOutput == null) { + LOG.info("fetcher#" + id + " - MergeManager returned status WAIT ..."); //Not an error but wait to process data. return EMPTY_ATTEMPT_ID_ARRAY; } @@ -396,13 +370,9 @@ class Fetcher extends Thread { LOG.info("fetcher#" + id + " about to shuffle output of map " + mapOutput.getMapId() + " decomp: " + decompressedLength + " len: " + compressedLength + " to " + - mapOutput.getType()); - if (mapOutput.getType() == Type.MEMORY) { - shuffleToMemory(host, mapOutput, input, - (int) decompressedLength, (int) compressedLength); - } else { - shuffleToDisk(host, mapOutput, input, compressedLength); - } + mapOutput.getDescription()); + mapOutput.shuffle(host, input, compressedLength, decompressedLength, + metrics, reporter); // Inform the shuffle scheduler long endTime = System.currentTimeMillis(); @@ -538,84 +508,4 @@ class Fetcher extends Thread { } } } - - private void shuffleToMemory(MapHost host, MapOutput mapOutput, - InputStream input, - int decompressedLength, - int compressedLength) throws IOException { - IFileInputStream checksumIn = - new IFileInputStream(input, compressedLength, job); - - input = checksumIn; - - // Are map-outputs compressed? - if (codec != null) { - decompressor.reset(); - input = codec.createInputStream(input, decompressor); - } - - // Copy map-output into an in-memory buffer - byte[] shuffleData = mapOutput.getMemory(); - - try { - IOUtils.readFully(input, shuffleData, 0, shuffleData.length); - metrics.inputBytes(shuffleData.length); - reporter.progress(); - LOG.info("Read " + shuffleData.length + " bytes from map-output for " + - mapOutput.getMapId()); - } catch (IOException ioe) { - // Close the streams - IOUtils.cleanup(LOG, input); - - // Re-throw - throw ioe; - } - - } - - private void shuffleToDisk(MapHost host, MapOutput mapOutput, - InputStream input, - long compressedLength) - throws IOException { - // Copy data to local-disk - OutputStream output = mapOutput.getDisk(); - long bytesLeft = compressedLength; - try { - final int BYTES_TO_READ = 64 * 1024; - byte[] buf = new byte[BYTES_TO_READ]; - while (bytesLeft > 0) { - int n = input.read(buf, 0, (int) Math.min(bytesLeft, BYTES_TO_READ)); - if (n < 0) { - throw new IOException("read past end of stream reading " + - mapOutput.getMapId()); - } - output.write(buf, 0, n); - bytesLeft -= n; - metrics.inputBytes(n); - reporter.progress(); - } - - LOG.info("Read " + (compressedLength - bytesLeft) + - " bytes from map-output for " + - mapOutput.getMapId()); - - output.close(); - } catch (IOException ioe) { - // Close the streams - IOUtils.cleanup(LOG, input, output); - - // Re-throw - throw ioe; - } - - // Sanity check - if (bytesLeft != 0) { - throw new IOException("Incomplete map output received for " + - mapOutput.getMapId() + " from " + - host.getHostName() + " (" + - bytesLeft + " bytes missing of " + - compressedLength + ")" - ); - } - } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java new file mode 100644 index 00000000000..87e9268c31a --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryMapOutput.java @@ -0,0 +1,127 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.task.reduce; + +import java.io.InputStream; +import java.io.IOException; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.conf.Configuration; + +import org.apache.hadoop.io.BoundedByteArrayOutputStream; +import org.apache.hadoop.io.IOUtils; + +import org.apache.hadoop.io.compress.CodecPool; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.io.compress.Decompressor; + +import org.apache.hadoop.mapred.IFileInputStream; +import org.apache.hadoop.mapred.Reporter; + +import org.apache.hadoop.mapreduce.TaskAttemptID; + +@InterfaceAudience.Private +@InterfaceStability.Unstable +class InMemoryMapOutput extends MapOutput { + private static final Log LOG = LogFactory.getLog(InMemoryMapOutput.class); + private Configuration conf; + private final MergeManagerImpl merger; + private final byte[] memory; + private BoundedByteArrayOutputStream byteStream; + // Decompression of map-outputs + private final CompressionCodec codec; + private final Decompressor decompressor; + + public InMemoryMapOutput(Configuration conf, TaskAttemptID mapId, + MergeManagerImpl merger, + int size, CompressionCodec codec, + boolean primaryMapOutput) { + super(mapId, (long)size, primaryMapOutput); + this.conf = conf; + this.merger = merger; + this.codec = codec; + byteStream = new BoundedByteArrayOutputStream(size); + memory = byteStream.getBuffer(); + if (codec != null) { + decompressor = CodecPool.getDecompressor(codec); + } else { + decompressor = null; + } + } + + public byte[] getMemory() { + return memory; + } + + public BoundedByteArrayOutputStream getArrayStream() { + return byteStream; + } + + @Override + public void shuffle(MapHost host, InputStream input, + long compressedLength, long decompressedLength, + ShuffleClientMetrics metrics, + Reporter reporter) throws IOException { + IFileInputStream checksumIn = + new IFileInputStream(input, compressedLength, conf); + + input = checksumIn; + + // Are map-outputs compressed? + if (codec != null) { + decompressor.reset(); + input = codec.createInputStream(input, decompressor); + } + + try { + IOUtils.readFully(input, memory, 0, memory.length); + metrics.inputBytes(memory.length); + reporter.progress(); + LOG.info("Read " + memory.length + " bytes from map-output for " + + getMapId()); + } catch (IOException ioe) { + // Close the streams + IOUtils.cleanup(LOG, input); + + // Re-throw + throw ioe; + } finally { + CodecPool.returnDecompressor(decompressor); + } + } + + @Override + public void commit() throws IOException { + merger.closeInMemoryFile(this); + } + + @Override + public void abort() { + merger.unreserve(memory.length); + } + + @Override + public String getDescription() { + return "MEMORY"; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java index 380856bced8..543ff3f9cc7 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/InMemoryReader.java @@ -35,12 +35,12 @@ import org.apache.hadoop.mapreduce.TaskAttemptID; @InterfaceStability.Unstable public class InMemoryReader extends Reader { private final TaskAttemptID taskAttemptId; - private final MergeManager merger; + private final MergeManagerImpl merger; DataInputBuffer memDataIn = new DataInputBuffer(); private int start; private int length; - public InMemoryReader(MergeManager merger, TaskAttemptID taskAttemptId, + public InMemoryReader(MergeManagerImpl merger, TaskAttemptID taskAttemptId, byte[] data, int start, int length) throws IOException { super(null, null, length - start, null, null); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java index fbe7096abfd..b5a8cf53999 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MapOutput.java @@ -17,119 +17,36 @@ */ package org.apache.hadoop.mapreduce.task.reduce; +import java.io.InputStream; import java.io.IOException; -import java.io.OutputStream; + import java.util.Comparator; import java.util.concurrent.atomic.AtomicInteger; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalDirAllocator; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.BoundedByteArrayOutputStream; -import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.MapOutputFile; + +import org.apache.hadoop.mapred.Reporter; + import org.apache.hadoop.mapreduce.TaskAttemptID; @InterfaceAudience.LimitedPrivate({"MapReduce"}) @InterfaceStability.Unstable -public class MapOutput { - private static final Log LOG = LogFactory.getLog(MapOutput.class); +public abstract class MapOutput { private static AtomicInteger ID = new AtomicInteger(0); - public static enum Type { - WAIT, - MEMORY, - DISK - } - private final int id; - - private final MergeManager merger; private final TaskAttemptID mapId; - private final long size; - - private final byte[] memory; - private BoundedByteArrayOutputStream byteStream; - - private final FileSystem localFS; - private final Path tmpOutputPath; - private final Path outputPath; - private final OutputStream disk; - - private final Type type; - private final boolean primaryMapOutput; - public MapOutput(TaskAttemptID mapId, MergeManager merger, long size, - JobConf conf, LocalDirAllocator localDirAllocator, - int fetcher, boolean primaryMapOutput, MapOutputFile mapOutputFile) - throws IOException { + public MapOutput(TaskAttemptID mapId, long size, boolean primaryMapOutput) { this.id = ID.incrementAndGet(); this.mapId = mapId; - this.merger = merger; - - type = Type.DISK; - - memory = null; - byteStream = null; - this.size = size; - - this.localFS = FileSystem.getLocal(conf); - outputPath = - mapOutputFile.getInputFileForWrite(mapId.getTaskID(),size); - tmpOutputPath = outputPath.suffix(String.valueOf(fetcher)); - - disk = localFS.create(tmpOutputPath); - this.primaryMapOutput = primaryMapOutput; } - public MapOutput(TaskAttemptID mapId, MergeManager merger, int size, - boolean primaryMapOutput) { - this.id = ID.incrementAndGet(); - this.mapId = mapId; - this.merger = merger; - - type = Type.MEMORY; - byteStream = new BoundedByteArrayOutputStream(size); - memory = byteStream.getBuffer(); - - this.size = size; - - localFS = null; - disk = null; - outputPath = null; - tmpOutputPath = null; - - this.primaryMapOutput = primaryMapOutput; - } - - public MapOutput(TaskAttemptID mapId) { - this.id = ID.incrementAndGet(); - this.mapId = mapId; - - type = Type.WAIT; - merger = null; - memory = null; - byteStream = null; - - size = -1; - - localFS = null; - disk = null; - outputPath = null; - tmpOutputPath = null; - - this.primaryMapOutput = false; -} - public boolean isPrimaryMapOutput() { return primaryMapOutput; } @@ -147,62 +64,28 @@ public class MapOutput { return id; } - public Path getOutputPath() { - return outputPath; - } - - public byte[] getMemory() { - return memory; - } - - public BoundedByteArrayOutputStream getArrayStream() { - return byteStream; - } - - public OutputStream getDisk() { - return disk; - } - public TaskAttemptID getMapId() { return mapId; } - public Type getType() { - return type; - } - public long getSize() { return size; } - public void commit() throws IOException { - if (type == Type.MEMORY) { - merger.closeInMemoryFile(this); - } else if (type == Type.DISK) { - localFS.rename(tmpOutputPath, outputPath); - merger.closeOnDiskFile(outputPath); - } else { - throw new IOException("Cannot commit MapOutput of type WAIT!"); - } - } - - public void abort() { - if (type == Type.MEMORY) { - merger.unreserve(memory.length); - } else if (type == Type.DISK) { - try { - localFS.delete(tmpOutputPath, false); - } catch (IOException ie) { - LOG.info("failure to clean up " + tmpOutputPath, ie); - } - } else { - throw new IllegalArgumentException - ("Cannot commit MapOutput with of type WAIT!"); - } - } + public abstract void shuffle(MapHost host, InputStream input, + long compressedLength, + long decompressedLength, + ShuffleClientMetrics metrics, + Reporter reporter) throws IOException; + + public abstract void commit() throws IOException; + public abstract void abort(); + + public abstract String getDescription(); + public String toString() { - return "MapOutput(" + mapId + ", " + type + ")"; + return "MapOutput(" + mapId + ", " + getDescription() + ")"; } public static class MapOutputComparator diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java index c75f14274dc..2ecc55ecbb1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManager.java @@ -15,783 +15,56 @@ * See the License for the specific language governing permissions and * limitations under the License. */ + package org.apache.hadoop.mapreduce.task.reduce; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.List; -import java.util.Set; -import java.util.TreeSet; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.ChecksumFileSystem; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.LocalDirAllocator; -import org.apache.hadoop.fs.LocalFileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.io.DataInputBuffer; -import org.apache.hadoop.io.RawComparator; import org.apache.hadoop.io.compress.CompressionCodec; import org.apache.hadoop.mapred.Counters; -import org.apache.hadoop.mapred.IFile; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapOutputFile; -import org.apache.hadoop.mapred.Merger; import org.apache.hadoop.mapred.RawKeyValueIterator; import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.Reporter; -import org.apache.hadoop.mapred.Task; -import org.apache.hadoop.mapred.IFile.Reader; -import org.apache.hadoop.mapred.IFile.Writer; -import org.apache.hadoop.mapred.Merger.Segment; import org.apache.hadoop.mapred.Task.CombineOutputCollector; -import org.apache.hadoop.mapred.Task.CombineValuesIterator; -import org.apache.hadoop.mapreduce.MRJobConfig; import org.apache.hadoop.mapreduce.TaskAttemptID; -import org.apache.hadoop.mapreduce.TaskID; -import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator; import org.apache.hadoop.util.Progress; -import org.apache.hadoop.util.ReflectionUtils; -import com.google.common.annotations.VisibleForTesting; +import java.io.IOException; -@SuppressWarnings(value={"unchecked"}) -@InterfaceAudience.LimitedPrivate({"MapReduce"}) +/** + * An interface for a reduce side merge that works with the default Shuffle + * implementation. + */ +@InterfaceAudience.Private @InterfaceStability.Unstable -public class MergeManager { - - private static final Log LOG = LogFactory.getLog(MergeManager.class); - - /* Maximum percentage of the in-memory limit that a single shuffle can - * consume*/ - private static final float DEFAULT_SHUFFLE_MEMORY_LIMIT_PERCENT - = 0.25f; - - private final TaskAttemptID reduceId; - - private final JobConf jobConf; - private final FileSystem localFS; - private final FileSystem rfs; - private final LocalDirAllocator localDirAllocator; - - protected MapOutputFile mapOutputFile; - - Set> inMemoryMergedMapOutputs = - new TreeSet>(new MapOutputComparator()); - private final IntermediateMemoryToMemoryMerger memToMemMerger; - - Set> inMemoryMapOutputs = - new TreeSet>(new MapOutputComparator()); - private final MergeThread, K,V> inMemoryMerger; - - Set onDiskMapOutputs = new TreeSet(); - private final OnDiskMerger onDiskMerger; - - private final long memoryLimit; - private long usedMemory; - private long commitMemory; - private final long maxSingleShuffleLimit; - - private final int memToMemMergeOutputsThreshold; - private final long mergeThreshold; - - private final int ioSortFactor; - - private final Reporter reporter; - private final ExceptionReporter exceptionReporter; - +public interface MergeManager { /** - * Combiner class to run during in-memory merge, if defined. + * To wait until merge has some freed resources available so that it can + * accept shuffled data. This will be called before a network connection is + * established to get the map output. */ - private final Class combinerClass; + public void waitForResource() throws InterruptedException; /** - * Resettable collector used for combine. + * To reserve resources for data to be shuffled. This will be called after + * a network connection is made to shuffle the data. + * @param mapId mapper from which data will be shuffled. + * @param requestedSize size in bytes of data that will be shuffled. + * @param fetcher id of the map output fetcher that will shuffle the data. + * @return a MapOutput object that can be used by shuffle to shuffle data. If + * required resources cannot be reserved immediately, a null can be returned. */ - private final CombineOutputCollector combineCollector; + public MapOutput reserve(TaskAttemptID mapId, long requestedSize, + int fetcher) throws IOException; - private final Counters.Counter spilledRecordsCounter; - - private final Counters.Counter reduceCombineInputCounter; - - private final Counters.Counter mergedMapOutputsCounter; - - private final CompressionCodec codec; - - private final Progress mergePhase; - - public MergeManager(TaskAttemptID reduceId, JobConf jobConf, - FileSystem localFS, - LocalDirAllocator localDirAllocator, - Reporter reporter, - CompressionCodec codec, - Class combinerClass, - CombineOutputCollector combineCollector, - Counters.Counter spilledRecordsCounter, - Counters.Counter reduceCombineInputCounter, - Counters.Counter mergedMapOutputsCounter, - ExceptionReporter exceptionReporter, - Progress mergePhase, MapOutputFile mapOutputFile) { - this.reduceId = reduceId; - this.jobConf = jobConf; - this.localDirAllocator = localDirAllocator; - this.exceptionReporter = exceptionReporter; - - this.reporter = reporter; - this.codec = codec; - this.combinerClass = combinerClass; - this.combineCollector = combineCollector; - this.reduceCombineInputCounter = reduceCombineInputCounter; - this.spilledRecordsCounter = spilledRecordsCounter; - this.mergedMapOutputsCounter = mergedMapOutputsCounter; - this.mapOutputFile = mapOutputFile; - this.mapOutputFile.setConf(jobConf); - - this.localFS = localFS; - this.rfs = ((LocalFileSystem)localFS).getRaw(); - - final float maxInMemCopyUse = - jobConf.getFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 0.90f); - if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) { - throw new IllegalArgumentException("Invalid value for " + - MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT + ": " + - maxInMemCopyUse); - } - - // Allow unit tests to fix Runtime memory - this.memoryLimit = - (long)(jobConf.getLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES, - Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE)) - * maxInMemCopyUse); - - this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100); - - final float singleShuffleMemoryLimitPercent = - jobConf.getFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT, - DEFAULT_SHUFFLE_MEMORY_LIMIT_PERCENT); - if (singleShuffleMemoryLimitPercent <= 0.0f - || singleShuffleMemoryLimitPercent > 1.0f) { - throw new IllegalArgumentException("Invalid value for " - + MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT + ": " - + singleShuffleMemoryLimitPercent); - } - - usedMemory = 0L; - commitMemory = 0L; - this.maxSingleShuffleLimit = - (long)(memoryLimit * singleShuffleMemoryLimitPercent); - this.memToMemMergeOutputsThreshold = - jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor); - this.mergeThreshold = (long)(this.memoryLimit * - jobConf.getFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, - 0.90f)); - LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " + - "maxSingleShuffleLimit=" + maxSingleShuffleLimit + ", " + - "mergeThreshold=" + mergeThreshold + ", " + - "ioSortFactor=" + ioSortFactor + ", " + - "memToMemMergeOutputsThreshold=" + memToMemMergeOutputsThreshold); - - if (this.maxSingleShuffleLimit >= this.mergeThreshold) { - throw new RuntimeException("Invlaid configuration: " - + "maxSingleShuffleLimit should be less than mergeThreshold" - + "maxSingleShuffleLimit: " + this.maxSingleShuffleLimit - + "mergeThreshold: " + this.mergeThreshold); - } - - boolean allowMemToMemMerge = - jobConf.getBoolean(MRJobConfig.REDUCE_MEMTOMEM_ENABLED, false); - if (allowMemToMemMerge) { - this.memToMemMerger = - new IntermediateMemoryToMemoryMerger(this, - memToMemMergeOutputsThreshold); - this.memToMemMerger.start(); - } else { - this.memToMemMerger = null; - } - - this.inMemoryMerger = createInMemoryMerger(); - this.inMemoryMerger.start(); - - this.onDiskMerger = new OnDiskMerger(this); - this.onDiskMerger.start(); - - this.mergePhase = mergePhase; - } - - protected MergeThread, K,V> createInMemoryMerger() { - return new InMemoryMerger(this); - } - - TaskAttemptID getReduceId() { - return reduceId; - } - - @VisibleForTesting - ExceptionReporter getExceptionReporter() { - return exceptionReporter; - } - - public void waitForInMemoryMerge() throws InterruptedException { - inMemoryMerger.waitForMerge(); - } - - private boolean canShuffleToMemory(long requestedSize) { - return (requestedSize < maxSingleShuffleLimit); - } - - final private MapOutput stallShuffle = new MapOutput(null); - - public synchronized MapOutput reserve(TaskAttemptID mapId, - long requestedSize, - int fetcher - ) throws IOException { - if (!canShuffleToMemory(requestedSize)) { - LOG.info(mapId + ": Shuffling to disk since " + requestedSize + - " is greater than maxSingleShuffleLimit (" + - maxSingleShuffleLimit + ")"); - return new MapOutput(mapId, this, requestedSize, jobConf, - localDirAllocator, fetcher, true, - mapOutputFile); - } - - // Stall shuffle if we are above the memory limit - - // It is possible that all threads could just be stalling and not make - // progress at all. This could happen when: - // - // requested size is causing the used memory to go above limit && - // requested size < singleShuffleLimit && - // current used size < mergeThreshold (merge will not get triggered) - // - // To avoid this from happening, we allow exactly one thread to go past - // the memory limit. We check (usedMemory > memoryLimit) and not - // (usedMemory + requestedSize > memoryLimit). When this thread is done - // fetching, this will automatically trigger a merge thereby unlocking - // all the stalled threads - - if (usedMemory > memoryLimit) { - LOG.debug(mapId + ": Stalling shuffle since usedMemory (" + usedMemory - + ") is greater than memoryLimit (" + memoryLimit + ")." + - " CommitMemory is (" + commitMemory + ")"); - return stallShuffle; - } - - // Allow the in-memory shuffle to progress - LOG.debug(mapId + ": Proceeding with shuffle since usedMemory (" - + usedMemory + ") is lesser than memoryLimit (" + memoryLimit + ")." - + "CommitMemory is (" + commitMemory + ")"); - return unconditionalReserve(mapId, requestedSize, true); - } - /** - * Unconditional Reserve is used by the Memory-to-Memory thread - * @return + * Called at the end of shuffle. + * @return a key value iterator object. */ - private synchronized MapOutput unconditionalReserve( - TaskAttemptID mapId, long requestedSize, boolean primaryMapOutput) { - usedMemory += requestedSize; - return new MapOutput(mapId, this, (int)requestedSize, - primaryMapOutput); - } - - synchronized void unreserve(long size) { - usedMemory -= size; - } - - public synchronized void closeInMemoryFile(MapOutput mapOutput) { - inMemoryMapOutputs.add(mapOutput); - LOG.info("closeInMemoryFile -> map-output of size: " + mapOutput.getSize() - + ", inMemoryMapOutputs.size() -> " + inMemoryMapOutputs.size() - + ", commitMemory -> " + commitMemory + ", usedMemory ->" + usedMemory); - - commitMemory+= mapOutput.getSize(); - - // Can hang if mergeThreshold is really low. - if (commitMemory >= mergeThreshold) { - LOG.info("Starting inMemoryMerger's merge since commitMemory=" + - commitMemory + " > mergeThreshold=" + mergeThreshold + - ". Current usedMemory=" + usedMemory); - inMemoryMapOutputs.addAll(inMemoryMergedMapOutputs); - inMemoryMergedMapOutputs.clear(); - inMemoryMerger.startMerge(inMemoryMapOutputs); - commitMemory = 0L; // Reset commitMemory. - } - - if (memToMemMerger != null) { - if (inMemoryMapOutputs.size() >= memToMemMergeOutputsThreshold) { - memToMemMerger.startMerge(inMemoryMapOutputs); - } - } - } - - - public synchronized void closeInMemoryMergedFile(MapOutput mapOutput) { - inMemoryMergedMapOutputs.add(mapOutput); - LOG.info("closeInMemoryMergedFile -> size: " + mapOutput.getSize() + - ", inMemoryMergedMapOutputs.size() -> " + - inMemoryMergedMapOutputs.size()); - } - - public synchronized void closeOnDiskFile(Path file) { - onDiskMapOutputs.add(file); - - if (onDiskMapOutputs.size() >= (2 * ioSortFactor - 1)) { - onDiskMerger.startMerge(onDiskMapOutputs); - } - } - - public RawKeyValueIterator close() throws Throwable { - // Wait for on-going merges to complete - if (memToMemMerger != null) { - memToMemMerger.close(); - } - inMemoryMerger.close(); - onDiskMerger.close(); - - List> memory = - new ArrayList>(inMemoryMergedMapOutputs); - memory.addAll(inMemoryMapOutputs); - List disk = new ArrayList(onDiskMapOutputs); - return finalMerge(jobConf, rfs, memory, disk); - } - - private class IntermediateMemoryToMemoryMerger - extends MergeThread, K, V> { - - public IntermediateMemoryToMemoryMerger(MergeManager manager, - int mergeFactor) { - super(manager, mergeFactor, exceptionReporter); - setName("InMemoryMerger - Thread to do in-memory merge of in-memory " + - "shuffled map-outputs"); - setDaemon(true); - } - - @Override - public void merge(List> inputs) throws IOException { - if (inputs == null || inputs.size() == 0) { - return; - } - - TaskAttemptID dummyMapId = inputs.get(0).getMapId(); - List> inMemorySegments = new ArrayList>(); - long mergeOutputSize = - createInMemorySegments(inputs, inMemorySegments, 0); - int noInMemorySegments = inMemorySegments.size(); - - MapOutput mergedMapOutputs = - unconditionalReserve(dummyMapId, mergeOutputSize, false); - - Writer writer = - new InMemoryWriter(mergedMapOutputs.getArrayStream()); - - LOG.info("Initiating Memory-to-Memory merge with " + noInMemorySegments + - " segments of total-size: " + mergeOutputSize); - - RawKeyValueIterator rIter = - Merger.merge(jobConf, rfs, - (Class)jobConf.getMapOutputKeyClass(), - (Class)jobConf.getMapOutputValueClass(), - inMemorySegments, inMemorySegments.size(), - new Path(reduceId.toString()), - (RawComparator)jobConf.getOutputKeyComparator(), - reporter, null, null, null); - Merger.writeFile(rIter, writer, reporter, jobConf); - writer.close(); - - LOG.info(reduceId + - " Memory-to-Memory merge of the " + noInMemorySegments + - " files in-memory complete."); - - // Note the output of the merge - closeInMemoryMergedFile(mergedMapOutputs); - } - } - - private class InMemoryMerger extends MergeThread, K,V> { - - public InMemoryMerger(MergeManager manager) { - super(manager, Integer.MAX_VALUE, exceptionReporter); - setName - ("InMemoryMerger - Thread to merge in-memory shuffled map-outputs"); - setDaemon(true); - } - - @Override - public void merge(List> inputs) throws IOException { - if (inputs == null || inputs.size() == 0) { - return; - } - - //name this output file same as the name of the first file that is - //there in the current list of inmem files (this is guaranteed to - //be absent on the disk currently. So we don't overwrite a prev. - //created spill). Also we need to create the output file now since - //it is not guaranteed that this file will be present after merge - //is called (we delete empty files as soon as we see them - //in the merge method) - - //figure out the mapId - TaskAttemptID mapId = inputs.get(0).getMapId(); - TaskID mapTaskId = mapId.getTaskID(); - - List> inMemorySegments = new ArrayList>(); - long mergeOutputSize = - createInMemorySegments(inputs, inMemorySegments,0); - int noInMemorySegments = inMemorySegments.size(); - - Path outputPath = - mapOutputFile.getInputFileForWrite(mapTaskId, - mergeOutputSize).suffix( - Task.MERGED_OUTPUT_PREFIX); - - Writer writer = - new Writer(jobConf, rfs, outputPath, - (Class) jobConf.getMapOutputKeyClass(), - (Class) jobConf.getMapOutputValueClass(), - codec, null); - - RawKeyValueIterator rIter = null; - try { - LOG.info("Initiating in-memory merge with " + noInMemorySegments + - " segments..."); - - rIter = Merger.merge(jobConf, rfs, - (Class)jobConf.getMapOutputKeyClass(), - (Class)jobConf.getMapOutputValueClass(), - inMemorySegments, inMemorySegments.size(), - new Path(reduceId.toString()), - (RawComparator)jobConf.getOutputKeyComparator(), - reporter, spilledRecordsCounter, null, null); - - if (null == combinerClass) { - Merger.writeFile(rIter, writer, reporter, jobConf); - } else { - combineCollector.setWriter(writer); - combineAndSpill(rIter, reduceCombineInputCounter); - } - writer.close(); - - LOG.info(reduceId + - " Merge of the " + noInMemorySegments + - " files in-memory complete." + - " Local file is " + outputPath + " of size " + - localFS.getFileStatus(outputPath).getLen()); - } catch (IOException e) { - //make sure that we delete the ondisk file that we created - //earlier when we invoked cloneFileAttributes - localFS.delete(outputPath, true); - throw e; - } - - // Note the output of the merge - closeOnDiskFile(outputPath); - } - - } - - private class OnDiskMerger extends MergeThread { - - public OnDiskMerger(MergeManager manager) { - super(manager, Integer.MAX_VALUE, exceptionReporter); - setName("OnDiskMerger - Thread to merge on-disk map-outputs"); - setDaemon(true); - } - - @Override - public void merge(List inputs) throws IOException { - // sanity check - if (inputs == null || inputs.isEmpty()) { - LOG.info("No ondisk files to merge..."); - return; - } - - long approxOutputSize = 0; - int bytesPerSum = - jobConf.getInt("io.bytes.per.checksum", 512); - - LOG.info("OnDiskMerger: We have " + inputs.size() + - " map outputs on disk. Triggering merge..."); - - // 1. Prepare the list of files to be merged. - for (Path file : inputs) { - approxOutputSize += localFS.getFileStatus(file).getLen(); - } - - // add the checksum length - approxOutputSize += - ChecksumFileSystem.getChecksumLength(approxOutputSize, bytesPerSum); - - // 2. Start the on-disk merge process - Path outputPath = - localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(), - approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX); - Writer writer = - new Writer(jobConf, rfs, outputPath, - (Class) jobConf.getMapOutputKeyClass(), - (Class) jobConf.getMapOutputValueClass(), - codec, null); - RawKeyValueIterator iter = null; - Path tmpDir = new Path(reduceId.toString()); - try { - iter = Merger.merge(jobConf, rfs, - (Class) jobConf.getMapOutputKeyClass(), - (Class) jobConf.getMapOutputValueClass(), - codec, inputs.toArray(new Path[inputs.size()]), - true, ioSortFactor, tmpDir, - (RawComparator) jobConf.getOutputKeyComparator(), - reporter, spilledRecordsCounter, null, - mergedMapOutputsCounter, null); - - Merger.writeFile(iter, writer, reporter, jobConf); - writer.close(); - } catch (IOException e) { - localFS.delete(outputPath, true); - throw e; - } - - closeOnDiskFile(outputPath); - - LOG.info(reduceId + - " Finished merging " + inputs.size() + - " map output files on disk of total-size " + - approxOutputSize + "." + - " Local output file is " + outputPath + " of size " + - localFS.getFileStatus(outputPath).getLen()); - } - } - - private void combineAndSpill( - RawKeyValueIterator kvIter, - Counters.Counter inCounter) throws IOException { - JobConf job = jobConf; - Reducer combiner = ReflectionUtils.newInstance(combinerClass, job); - Class keyClass = (Class) job.getMapOutputKeyClass(); - Class valClass = (Class) job.getMapOutputValueClass(); - RawComparator comparator = - (RawComparator)job.getOutputKeyComparator(); - try { - CombineValuesIterator values = new CombineValuesIterator( - kvIter, comparator, keyClass, valClass, job, Reporter.NULL, - inCounter); - while (values.more()) { - combiner.reduce(values.getKey(), values, combineCollector, - Reporter.NULL); - values.nextKey(); - } - } finally { - combiner.close(); - } - } - - private long createInMemorySegments(List> inMemoryMapOutputs, - List> inMemorySegments, - long leaveBytes - ) throws IOException { - long totalSize = 0L; - // We could use fullSize could come from the RamManager, but files can be - // closed but not yet present in inMemoryMapOutputs - long fullSize = 0L; - for (MapOutput mo : inMemoryMapOutputs) { - fullSize += mo.getMemory().length; - } - while(fullSize > leaveBytes) { - MapOutput mo = inMemoryMapOutputs.remove(0); - byte[] data = mo.getMemory(); - long size = data.length; - totalSize += size; - fullSize -= size; - Reader reader = new InMemoryReader(MergeManager.this, - mo.getMapId(), - data, 0, (int)size); - inMemorySegments.add(new Segment(reader, true, - (mo.isPrimaryMapOutput() ? - mergedMapOutputsCounter : null))); - } - return totalSize; - } - - class RawKVIteratorReader extends IFile.Reader { - - private final RawKeyValueIterator kvIter; - - public RawKVIteratorReader(RawKeyValueIterator kvIter, long size) - throws IOException { - super(null, null, size, null, spilledRecordsCounter); - this.kvIter = kvIter; - } - public boolean nextRawKey(DataInputBuffer key) throws IOException { - if (kvIter.next()) { - final DataInputBuffer kb = kvIter.getKey(); - final int kp = kb.getPosition(); - final int klen = kb.getLength() - kp; - key.reset(kb.getData(), kp, klen); - bytesRead += klen; - return true; - } - return false; - } - public void nextRawValue(DataInputBuffer value) throws IOException { - final DataInputBuffer vb = kvIter.getValue(); - final int vp = vb.getPosition(); - final int vlen = vb.getLength() - vp; - value.reset(vb.getData(), vp, vlen); - bytesRead += vlen; - } - public long getPosition() throws IOException { - return bytesRead; - } - - public void close() throws IOException { - kvIter.close(); - } - } - - private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs, - List> inMemoryMapOutputs, - List onDiskMapOutputs - ) throws IOException { - LOG.info("finalMerge called with " + - inMemoryMapOutputs.size() + " in-memory map-outputs and " + - onDiskMapOutputs.size() + " on-disk map-outputs"); - - final float maxRedPer = - job.getFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 0f); - if (maxRedPer > 1.0 || maxRedPer < 0.0) { - throw new IOException(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT + - maxRedPer); - } - int maxInMemReduce = (int)Math.min( - Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE); - - - // merge config params - Class keyClass = (Class)job.getMapOutputKeyClass(); - Class valueClass = (Class)job.getMapOutputValueClass(); - boolean keepInputs = job.getKeepFailedTaskFiles(); - final Path tmpDir = new Path(reduceId.toString()); - final RawComparator comparator = - (RawComparator)job.getOutputKeyComparator(); - - // segments required to vacate memory - List> memDiskSegments = new ArrayList>(); - long inMemToDiskBytes = 0; - boolean mergePhaseFinished = false; - if (inMemoryMapOutputs.size() > 0) { - TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID(); - inMemToDiskBytes = createInMemorySegments(inMemoryMapOutputs, - memDiskSegments, - maxInMemReduce); - final int numMemDiskSegments = memDiskSegments.size(); - if (numMemDiskSegments > 0 && - ioSortFactor > onDiskMapOutputs.size()) { - - // If we reach here, it implies that we have less than io.sort.factor - // disk segments and this will be incremented by 1 (result of the - // memory segments merge). Since this total would still be - // <= io.sort.factor, we will not do any more intermediate merges, - // the merge of all these disk segments would be directly fed to the - // reduce method - - mergePhaseFinished = true; - // must spill to disk, but can't retain in-mem for intermediate merge - final Path outputPath = - mapOutputFile.getInputFileForWrite(mapId, - inMemToDiskBytes).suffix( - Task.MERGED_OUTPUT_PREFIX); - final RawKeyValueIterator rIter = Merger.merge(job, fs, - keyClass, valueClass, memDiskSegments, numMemDiskSegments, - tmpDir, comparator, reporter, spilledRecordsCounter, null, - mergePhase); - final Writer writer = new Writer(job, fs, outputPath, - keyClass, valueClass, codec, null); - try { - Merger.writeFile(rIter, writer, reporter, job); - // add to list of final disk outputs. - onDiskMapOutputs.add(outputPath); - } catch (IOException e) { - if (null != outputPath) { - try { - fs.delete(outputPath, true); - } catch (IOException ie) { - // NOTHING - } - } - throw e; - } finally { - if (null != writer) { - writer.close(); - } - } - LOG.info("Merged " + numMemDiskSegments + " segments, " + - inMemToDiskBytes + " bytes to disk to satisfy " + - "reduce memory limit"); - inMemToDiskBytes = 0; - memDiskSegments.clear(); - } else if (inMemToDiskBytes != 0) { - LOG.info("Keeping " + numMemDiskSegments + " segments, " + - inMemToDiskBytes + " bytes in memory for " + - "intermediate, on-disk merge"); - } - } - - // segments on disk - List> diskSegments = new ArrayList>(); - long onDiskBytes = inMemToDiskBytes; - Path[] onDisk = onDiskMapOutputs.toArray(new Path[onDiskMapOutputs.size()]); - for (Path file : onDisk) { - onDiskBytes += fs.getFileStatus(file).getLen(); - LOG.debug("Disk file: " + file + " Length is " + - fs.getFileStatus(file).getLen()); - diskSegments.add(new Segment(job, fs, file, codec, keepInputs, - (file.toString().endsWith( - Task.MERGED_OUTPUT_PREFIX) ? - null : mergedMapOutputsCounter) - )); - } - LOG.info("Merging " + onDisk.length + " files, " + - onDiskBytes + " bytes from disk"); - Collections.sort(diskSegments, new Comparator>() { - public int compare(Segment o1, Segment o2) { - if (o1.getLength() == o2.getLength()) { - return 0; - } - return o1.getLength() < o2.getLength() ? -1 : 1; - } - }); - - // build final list of segments from merged backed by disk + in-mem - List> finalSegments = new ArrayList>(); - long inMemBytes = createInMemorySegments(inMemoryMapOutputs, - finalSegments, 0); - LOG.info("Merging " + finalSegments.size() + " segments, " + - inMemBytes + " bytes from memory into reduce"); - if (0 != onDiskBytes) { - final int numInMemSegments = memDiskSegments.size(); - diskSegments.addAll(0, memDiskSegments); - memDiskSegments.clear(); - // Pass mergePhase only if there is a going to be intermediate - // merges. See comment where mergePhaseFinished is being set - Progress thisPhase = (mergePhaseFinished) ? null : mergePhase; - RawKeyValueIterator diskMerge = Merger.merge( - job, fs, keyClass, valueClass, diskSegments, - ioSortFactor, numInMemSegments, tmpDir, comparator, - reporter, false, spilledRecordsCounter, null, thisPhase); - diskSegments.clear(); - if (0 == finalSegments.size()) { - return diskMerge; - } - finalSegments.add(new Segment( - new RawKVIteratorReader(diskMerge, onDiskBytes), true)); - } - return Merger.merge(job, fs, keyClass, valueClass, - finalSegments, finalSegments.size(), tmpDir, - comparator, reporter, spilledRecordsCounter, null, - null); - - } + public RawKeyValueIterator close() throws Throwable; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java new file mode 100644 index 00000000000..007897f17f0 --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeManagerImpl.java @@ -0,0 +1,797 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.task.reduce; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.List; +import java.util.Set; +import java.util.TreeSet; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; +import org.apache.hadoop.fs.ChecksumFileSystem; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.LocalDirAllocator; +import org.apache.hadoop.fs.LocalFileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.io.DataInputBuffer; +import org.apache.hadoop.io.RawComparator; +import org.apache.hadoop.io.compress.CompressionCodec; +import org.apache.hadoop.mapred.Counters; +import org.apache.hadoop.mapred.IFile; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.MapOutputFile; +import org.apache.hadoop.mapred.Merger; +import org.apache.hadoop.mapred.RawKeyValueIterator; +import org.apache.hadoop.mapred.Reducer; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.Task; +import org.apache.hadoop.mapred.IFile.Reader; +import org.apache.hadoop.mapred.IFile.Writer; +import org.apache.hadoop.mapred.Merger.Segment; +import org.apache.hadoop.mapred.Task.CombineOutputCollector; +import org.apache.hadoop.mapred.Task.CombineValuesIterator; +import org.apache.hadoop.mapreduce.MRJobConfig; +import org.apache.hadoop.mapreduce.TaskAttemptID; +import org.apache.hadoop.mapreduce.TaskID; +import org.apache.hadoop.mapreduce.task.reduce.MapOutput.MapOutputComparator; +import org.apache.hadoop.util.Progress; +import org.apache.hadoop.util.ReflectionUtils; + +import com.google.common.annotations.VisibleForTesting; + +@SuppressWarnings(value={"unchecked"}) +@InterfaceAudience.LimitedPrivate({"MapReduce"}) +@InterfaceStability.Unstable +public class MergeManagerImpl implements MergeManager { + + private static final Log LOG = LogFactory.getLog(MergeManagerImpl.class); + + /* Maximum percentage of the in-memory limit that a single shuffle can + * consume*/ + private static final float DEFAULT_SHUFFLE_MEMORY_LIMIT_PERCENT + = 0.25f; + + private final TaskAttemptID reduceId; + + private final JobConf jobConf; + private final FileSystem localFS; + private final FileSystem rfs; + private final LocalDirAllocator localDirAllocator; + + protected MapOutputFile mapOutputFile; + + Set> inMemoryMergedMapOutputs = + new TreeSet>(new MapOutputComparator()); + private IntermediateMemoryToMemoryMerger memToMemMerger; + + Set> inMemoryMapOutputs = + new TreeSet>(new MapOutputComparator()); + private final MergeThread, K,V> inMemoryMerger; + + Set onDiskMapOutputs = new TreeSet(); + private final OnDiskMerger onDiskMerger; + + private final long memoryLimit; + private long usedMemory; + private long commitMemory; + private final long maxSingleShuffleLimit; + + private final int memToMemMergeOutputsThreshold; + private final long mergeThreshold; + + private final int ioSortFactor; + + private final Reporter reporter; + private final ExceptionReporter exceptionReporter; + + /** + * Combiner class to run during in-memory merge, if defined. + */ + private final Class combinerClass; + + /** + * Resettable collector used for combine. + */ + private final CombineOutputCollector combineCollector; + + private final Counters.Counter spilledRecordsCounter; + + private final Counters.Counter reduceCombineInputCounter; + + private final Counters.Counter mergedMapOutputsCounter; + + private final CompressionCodec codec; + + private final Progress mergePhase; + + public MergeManagerImpl(TaskAttemptID reduceId, JobConf jobConf, + FileSystem localFS, + LocalDirAllocator localDirAllocator, + Reporter reporter, + CompressionCodec codec, + Class combinerClass, + CombineOutputCollector combineCollector, + Counters.Counter spilledRecordsCounter, + Counters.Counter reduceCombineInputCounter, + Counters.Counter mergedMapOutputsCounter, + ExceptionReporter exceptionReporter, + Progress mergePhase, MapOutputFile mapOutputFile) { + this.reduceId = reduceId; + this.jobConf = jobConf; + this.localDirAllocator = localDirAllocator; + this.exceptionReporter = exceptionReporter; + + this.reporter = reporter; + this.codec = codec; + this.combinerClass = combinerClass; + this.combineCollector = combineCollector; + this.reduceCombineInputCounter = reduceCombineInputCounter; + this.spilledRecordsCounter = spilledRecordsCounter; + this.mergedMapOutputsCounter = mergedMapOutputsCounter; + this.mapOutputFile = mapOutputFile; + this.mapOutputFile.setConf(jobConf); + + this.localFS = localFS; + this.rfs = ((LocalFileSystem)localFS).getRaw(); + + final float maxInMemCopyUse = + jobConf.getFloat(MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT, 0.90f); + if (maxInMemCopyUse > 1.0 || maxInMemCopyUse < 0.0) { + throw new IllegalArgumentException("Invalid value for " + + MRJobConfig.SHUFFLE_INPUT_BUFFER_PERCENT + ": " + + maxInMemCopyUse); + } + + // Allow unit tests to fix Runtime memory + this.memoryLimit = + (long)(jobConf.getLong(MRJobConfig.REDUCE_MEMORY_TOTAL_BYTES, + Math.min(Runtime.getRuntime().maxMemory(), Integer.MAX_VALUE)) + * maxInMemCopyUse); + + this.ioSortFactor = jobConf.getInt(MRJobConfig.IO_SORT_FACTOR, 100); + + final float singleShuffleMemoryLimitPercent = + jobConf.getFloat(MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT, + DEFAULT_SHUFFLE_MEMORY_LIMIT_PERCENT); + if (singleShuffleMemoryLimitPercent <= 0.0f + || singleShuffleMemoryLimitPercent > 1.0f) { + throw new IllegalArgumentException("Invalid value for " + + MRJobConfig.SHUFFLE_MEMORY_LIMIT_PERCENT + ": " + + singleShuffleMemoryLimitPercent); + } + + usedMemory = 0L; + commitMemory = 0L; + this.maxSingleShuffleLimit = + (long)(memoryLimit * singleShuffleMemoryLimitPercent); + this.memToMemMergeOutputsThreshold = + jobConf.getInt(MRJobConfig.REDUCE_MEMTOMEM_THRESHOLD, ioSortFactor); + this.mergeThreshold = (long)(this.memoryLimit * + jobConf.getFloat(MRJobConfig.SHUFFLE_MERGE_PERCENT, + 0.90f)); + LOG.info("MergerManager: memoryLimit=" + memoryLimit + ", " + + "maxSingleShuffleLimit=" + maxSingleShuffleLimit + ", " + + "mergeThreshold=" + mergeThreshold + ", " + + "ioSortFactor=" + ioSortFactor + ", " + + "memToMemMergeOutputsThreshold=" + memToMemMergeOutputsThreshold); + + if (this.maxSingleShuffleLimit >= this.mergeThreshold) { + throw new RuntimeException("Invlaid configuration: " + + "maxSingleShuffleLimit should be less than mergeThreshold" + + "maxSingleShuffleLimit: " + this.maxSingleShuffleLimit + + "mergeThreshold: " + this.mergeThreshold); + } + + boolean allowMemToMemMerge = + jobConf.getBoolean(MRJobConfig.REDUCE_MEMTOMEM_ENABLED, false); + if (allowMemToMemMerge) { + this.memToMemMerger = + new IntermediateMemoryToMemoryMerger(this, + memToMemMergeOutputsThreshold); + this.memToMemMerger.start(); + } else { + this.memToMemMerger = null; + } + + this.inMemoryMerger = createInMemoryMerger(); + this.inMemoryMerger.start(); + + this.onDiskMerger = new OnDiskMerger(this); + this.onDiskMerger.start(); + + this.mergePhase = mergePhase; + } + + protected MergeThread, K,V> createInMemoryMerger() { + return new InMemoryMerger(this); + } + + TaskAttemptID getReduceId() { + return reduceId; + } + + @VisibleForTesting + ExceptionReporter getExceptionReporter() { + return exceptionReporter; + } + + @Override + public void waitForResource() throws InterruptedException { + inMemoryMerger.waitForMerge(); + } + + private boolean canShuffleToMemory(long requestedSize) { + return (requestedSize < maxSingleShuffleLimit); + } + + @Override + public synchronized MapOutput reserve(TaskAttemptID mapId, + long requestedSize, + int fetcher + ) throws IOException { + if (!canShuffleToMemory(requestedSize)) { + LOG.info(mapId + ": Shuffling to disk since " + requestedSize + + " is greater than maxSingleShuffleLimit (" + + maxSingleShuffleLimit + ")"); + return new OnDiskMapOutput(mapId, reduceId, this, requestedSize, + jobConf, mapOutputFile, fetcher, true); + } + + // Stall shuffle if we are above the memory limit + + // It is possible that all threads could just be stalling and not make + // progress at all. This could happen when: + // + // requested size is causing the used memory to go above limit && + // requested size < singleShuffleLimit && + // current used size < mergeThreshold (merge will not get triggered) + // + // To avoid this from happening, we allow exactly one thread to go past + // the memory limit. We check (usedMemory > memoryLimit) and not + // (usedMemory + requestedSize > memoryLimit). When this thread is done + // fetching, this will automatically trigger a merge thereby unlocking + // all the stalled threads + + if (usedMemory > memoryLimit) { + LOG.debug(mapId + ": Stalling shuffle since usedMemory (" + usedMemory + + ") is greater than memoryLimit (" + memoryLimit + ")." + + " CommitMemory is (" + commitMemory + ")"); + return null; + } + + // Allow the in-memory shuffle to progress + LOG.debug(mapId + ": Proceeding with shuffle since usedMemory (" + + usedMemory + ") is lesser than memoryLimit (" + memoryLimit + ")." + + "CommitMemory is (" + commitMemory + ")"); + return unconditionalReserve(mapId, requestedSize, true); + } + + /** + * Unconditional Reserve is used by the Memory-to-Memory thread + * @return + */ + private synchronized InMemoryMapOutput unconditionalReserve( + TaskAttemptID mapId, long requestedSize, boolean primaryMapOutput) { + usedMemory += requestedSize; + return new InMemoryMapOutput(jobConf, mapId, this, (int)requestedSize, + codec, primaryMapOutput); + } + + synchronized void unreserve(long size) { + usedMemory -= size; + } + + public synchronized void closeInMemoryFile(InMemoryMapOutput mapOutput) { + inMemoryMapOutputs.add(mapOutput); + LOG.info("closeInMemoryFile -> map-output of size: " + mapOutput.getSize() + + ", inMemoryMapOutputs.size() -> " + inMemoryMapOutputs.size() + + ", commitMemory -> " + commitMemory + ", usedMemory ->" + usedMemory); + + commitMemory+= mapOutput.getSize(); + + // Can hang if mergeThreshold is really low. + if (commitMemory >= mergeThreshold) { + LOG.info("Starting inMemoryMerger's merge since commitMemory=" + + commitMemory + " > mergeThreshold=" + mergeThreshold + + ". Current usedMemory=" + usedMemory); + inMemoryMapOutputs.addAll(inMemoryMergedMapOutputs); + inMemoryMergedMapOutputs.clear(); + inMemoryMerger.startMerge(inMemoryMapOutputs); + commitMemory = 0L; // Reset commitMemory. + } + + if (memToMemMerger != null) { + if (inMemoryMapOutputs.size() >= memToMemMergeOutputsThreshold) { + memToMemMerger.startMerge(inMemoryMapOutputs); + } + } + } + + + public synchronized void closeInMemoryMergedFile(InMemoryMapOutput mapOutput) { + inMemoryMergedMapOutputs.add(mapOutput); + LOG.info("closeInMemoryMergedFile -> size: " + mapOutput.getSize() + + ", inMemoryMergedMapOutputs.size() -> " + + inMemoryMergedMapOutputs.size()); + } + + public synchronized void closeOnDiskFile(Path file) { + onDiskMapOutputs.add(file); + + if (onDiskMapOutputs.size() >= (2 * ioSortFactor - 1)) { + onDiskMerger.startMerge(onDiskMapOutputs); + } + } + + @Override + public RawKeyValueIterator close() throws Throwable { + // Wait for on-going merges to complete + if (memToMemMerger != null) { + memToMemMerger.close(); + } + inMemoryMerger.close(); + onDiskMerger.close(); + + List> memory = + new ArrayList>(inMemoryMergedMapOutputs); + memory.addAll(inMemoryMapOutputs); + List disk = new ArrayList(onDiskMapOutputs); + return finalMerge(jobConf, rfs, memory, disk); + } + + private class IntermediateMemoryToMemoryMerger + extends MergeThread, K, V> { + + public IntermediateMemoryToMemoryMerger(MergeManagerImpl manager, + int mergeFactor) { + super(manager, mergeFactor, exceptionReporter); + setName("InMemoryMerger - Thread to do in-memory merge of in-memory " + + "shuffled map-outputs"); + setDaemon(true); + } + + @Override + public void merge(List> inputs) throws IOException { + if (inputs == null || inputs.size() == 0) { + return; + } + + TaskAttemptID dummyMapId = inputs.get(0).getMapId(); + List> inMemorySegments = new ArrayList>(); + long mergeOutputSize = + createInMemorySegments(inputs, inMemorySegments, 0); + int noInMemorySegments = inMemorySegments.size(); + + InMemoryMapOutput mergedMapOutputs = + unconditionalReserve(dummyMapId, mergeOutputSize, false); + + Writer writer = + new InMemoryWriter(mergedMapOutputs.getArrayStream()); + + LOG.info("Initiating Memory-to-Memory merge with " + noInMemorySegments + + " segments of total-size: " + mergeOutputSize); + + RawKeyValueIterator rIter = + Merger.merge(jobConf, rfs, + (Class)jobConf.getMapOutputKeyClass(), + (Class)jobConf.getMapOutputValueClass(), + inMemorySegments, inMemorySegments.size(), + new Path(reduceId.toString()), + (RawComparator)jobConf.getOutputKeyComparator(), + reporter, null, null, null); + Merger.writeFile(rIter, writer, reporter, jobConf); + writer.close(); + + LOG.info(reduceId + + " Memory-to-Memory merge of the " + noInMemorySegments + + " files in-memory complete."); + + // Note the output of the merge + closeInMemoryMergedFile(mergedMapOutputs); + } + } + + private class InMemoryMerger extends MergeThread, K,V> { + + public InMemoryMerger(MergeManagerImpl manager) { + super(manager, Integer.MAX_VALUE, exceptionReporter); + setName + ("InMemoryMerger - Thread to merge in-memory shuffled map-outputs"); + setDaemon(true); + } + + @Override + public void merge(List> inputs) throws IOException { + if (inputs == null || inputs.size() == 0) { + return; + } + + //name this output file same as the name of the first file that is + //there in the current list of inmem files (this is guaranteed to + //be absent on the disk currently. So we don't overwrite a prev. + //created spill). Also we need to create the output file now since + //it is not guaranteed that this file will be present after merge + //is called (we delete empty files as soon as we see them + //in the merge method) + + //figure out the mapId + TaskAttemptID mapId = inputs.get(0).getMapId(); + TaskID mapTaskId = mapId.getTaskID(); + + List> inMemorySegments = new ArrayList>(); + long mergeOutputSize = + createInMemorySegments(inputs, inMemorySegments,0); + int noInMemorySegments = inMemorySegments.size(); + + Path outputPath = + mapOutputFile.getInputFileForWrite(mapTaskId, + mergeOutputSize).suffix( + Task.MERGED_OUTPUT_PREFIX); + + Writer writer = + new Writer(jobConf, rfs, outputPath, + (Class) jobConf.getMapOutputKeyClass(), + (Class) jobConf.getMapOutputValueClass(), + codec, null); + + RawKeyValueIterator rIter = null; + try { + LOG.info("Initiating in-memory merge with " + noInMemorySegments + + " segments..."); + + rIter = Merger.merge(jobConf, rfs, + (Class)jobConf.getMapOutputKeyClass(), + (Class)jobConf.getMapOutputValueClass(), + inMemorySegments, inMemorySegments.size(), + new Path(reduceId.toString()), + (RawComparator)jobConf.getOutputKeyComparator(), + reporter, spilledRecordsCounter, null, null); + + if (null == combinerClass) { + Merger.writeFile(rIter, writer, reporter, jobConf); + } else { + combineCollector.setWriter(writer); + combineAndSpill(rIter, reduceCombineInputCounter); + } + writer.close(); + + LOG.info(reduceId + + " Merge of the " + noInMemorySegments + + " files in-memory complete." + + " Local file is " + outputPath + " of size " + + localFS.getFileStatus(outputPath).getLen()); + } catch (IOException e) { + //make sure that we delete the ondisk file that we created + //earlier when we invoked cloneFileAttributes + localFS.delete(outputPath, true); + throw e; + } + + // Note the output of the merge + closeOnDiskFile(outputPath); + } + + } + + private class OnDiskMerger extends MergeThread { + + public OnDiskMerger(MergeManagerImpl manager) { + super(manager, Integer.MAX_VALUE, exceptionReporter); + setName("OnDiskMerger - Thread to merge on-disk map-outputs"); + setDaemon(true); + } + + @Override + public void merge(List inputs) throws IOException { + // sanity check + if (inputs == null || inputs.isEmpty()) { + LOG.info("No ondisk files to merge..."); + return; + } + + long approxOutputSize = 0; + int bytesPerSum = + jobConf.getInt("io.bytes.per.checksum", 512); + + LOG.info("OnDiskMerger: We have " + inputs.size() + + " map outputs on disk. Triggering merge..."); + + // 1. Prepare the list of files to be merged. + for (Path file : inputs) { + approxOutputSize += localFS.getFileStatus(file).getLen(); + } + + // add the checksum length + approxOutputSize += + ChecksumFileSystem.getChecksumLength(approxOutputSize, bytesPerSum); + + // 2. Start the on-disk merge process + Path outputPath = + localDirAllocator.getLocalPathForWrite(inputs.get(0).toString(), + approxOutputSize, jobConf).suffix(Task.MERGED_OUTPUT_PREFIX); + Writer writer = + new Writer(jobConf, rfs, outputPath, + (Class) jobConf.getMapOutputKeyClass(), + (Class) jobConf.getMapOutputValueClass(), + codec, null); + RawKeyValueIterator iter = null; + Path tmpDir = new Path(reduceId.toString()); + try { + iter = Merger.merge(jobConf, rfs, + (Class) jobConf.getMapOutputKeyClass(), + (Class) jobConf.getMapOutputValueClass(), + codec, inputs.toArray(new Path[inputs.size()]), + true, ioSortFactor, tmpDir, + (RawComparator) jobConf.getOutputKeyComparator(), + reporter, spilledRecordsCounter, null, + mergedMapOutputsCounter, null); + + Merger.writeFile(iter, writer, reporter, jobConf); + writer.close(); + } catch (IOException e) { + localFS.delete(outputPath, true); + throw e; + } + + closeOnDiskFile(outputPath); + + LOG.info(reduceId + + " Finished merging " + inputs.size() + + " map output files on disk of total-size " + + approxOutputSize + "." + + " Local output file is " + outputPath + " of size " + + localFS.getFileStatus(outputPath).getLen()); + } + } + + private void combineAndSpill( + RawKeyValueIterator kvIter, + Counters.Counter inCounter) throws IOException { + JobConf job = jobConf; + Reducer combiner = ReflectionUtils.newInstance(combinerClass, job); + Class keyClass = (Class) job.getMapOutputKeyClass(); + Class valClass = (Class) job.getMapOutputValueClass(); + RawComparator comparator = + (RawComparator)job.getOutputKeyComparator(); + try { + CombineValuesIterator values = new CombineValuesIterator( + kvIter, comparator, keyClass, valClass, job, Reporter.NULL, + inCounter); + while (values.more()) { + combiner.reduce(values.getKey(), values, combineCollector, + Reporter.NULL); + values.nextKey(); + } + } finally { + combiner.close(); + } + } + + private long createInMemorySegments(List> inMemoryMapOutputs, + List> inMemorySegments, + long leaveBytes + ) throws IOException { + long totalSize = 0L; + // We could use fullSize could come from the RamManager, but files can be + // closed but not yet present in inMemoryMapOutputs + long fullSize = 0L; + for (InMemoryMapOutput mo : inMemoryMapOutputs) { + fullSize += mo.getMemory().length; + } + while(fullSize > leaveBytes) { + InMemoryMapOutput mo = inMemoryMapOutputs.remove(0); + byte[] data = mo.getMemory(); + long size = data.length; + totalSize += size; + fullSize -= size; + Reader reader = new InMemoryReader(MergeManagerImpl.this, + mo.getMapId(), + data, 0, (int)size); + inMemorySegments.add(new Segment(reader, true, + (mo.isPrimaryMapOutput() ? + mergedMapOutputsCounter : null))); + } + return totalSize; + } + + class RawKVIteratorReader extends IFile.Reader { + + private final RawKeyValueIterator kvIter; + + public RawKVIteratorReader(RawKeyValueIterator kvIter, long size) + throws IOException { + super(null, null, size, null, spilledRecordsCounter); + this.kvIter = kvIter; + } + public boolean nextRawKey(DataInputBuffer key) throws IOException { + if (kvIter.next()) { + final DataInputBuffer kb = kvIter.getKey(); + final int kp = kb.getPosition(); + final int klen = kb.getLength() - kp; + key.reset(kb.getData(), kp, klen); + bytesRead += klen; + return true; + } + return false; + } + public void nextRawValue(DataInputBuffer value) throws IOException { + final DataInputBuffer vb = kvIter.getValue(); + final int vp = vb.getPosition(); + final int vlen = vb.getLength() - vp; + value.reset(vb.getData(), vp, vlen); + bytesRead += vlen; + } + public long getPosition() throws IOException { + return bytesRead; + } + + public void close() throws IOException { + kvIter.close(); + } + } + + private RawKeyValueIterator finalMerge(JobConf job, FileSystem fs, + List> inMemoryMapOutputs, + List onDiskMapOutputs + ) throws IOException { + LOG.info("finalMerge called with " + + inMemoryMapOutputs.size() + " in-memory map-outputs and " + + onDiskMapOutputs.size() + " on-disk map-outputs"); + + final float maxRedPer = + job.getFloat(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT, 0f); + if (maxRedPer > 1.0 || maxRedPer < 0.0) { + throw new IOException(MRJobConfig.REDUCE_INPUT_BUFFER_PERCENT + + maxRedPer); + } + int maxInMemReduce = (int)Math.min( + Runtime.getRuntime().maxMemory() * maxRedPer, Integer.MAX_VALUE); + + + // merge config params + Class keyClass = (Class)job.getMapOutputKeyClass(); + Class valueClass = (Class)job.getMapOutputValueClass(); + boolean keepInputs = job.getKeepFailedTaskFiles(); + final Path tmpDir = new Path(reduceId.toString()); + final RawComparator comparator = + (RawComparator)job.getOutputKeyComparator(); + + // segments required to vacate memory + List> memDiskSegments = new ArrayList>(); + long inMemToDiskBytes = 0; + boolean mergePhaseFinished = false; + if (inMemoryMapOutputs.size() > 0) { + TaskID mapId = inMemoryMapOutputs.get(0).getMapId().getTaskID(); + inMemToDiskBytes = createInMemorySegments(inMemoryMapOutputs, + memDiskSegments, + maxInMemReduce); + final int numMemDiskSegments = memDiskSegments.size(); + if (numMemDiskSegments > 0 && + ioSortFactor > onDiskMapOutputs.size()) { + + // If we reach here, it implies that we have less than io.sort.factor + // disk segments and this will be incremented by 1 (result of the + // memory segments merge). Since this total would still be + // <= io.sort.factor, we will not do any more intermediate merges, + // the merge of all these disk segments would be directly fed to the + // reduce method + + mergePhaseFinished = true; + // must spill to disk, but can't retain in-mem for intermediate merge + final Path outputPath = + mapOutputFile.getInputFileForWrite(mapId, + inMemToDiskBytes).suffix( + Task.MERGED_OUTPUT_PREFIX); + final RawKeyValueIterator rIter = Merger.merge(job, fs, + keyClass, valueClass, memDiskSegments, numMemDiskSegments, + tmpDir, comparator, reporter, spilledRecordsCounter, null, + mergePhase); + final Writer writer = new Writer(job, fs, outputPath, + keyClass, valueClass, codec, null); + try { + Merger.writeFile(rIter, writer, reporter, job); + // add to list of final disk outputs. + onDiskMapOutputs.add(outputPath); + } catch (IOException e) { + if (null != outputPath) { + try { + fs.delete(outputPath, true); + } catch (IOException ie) { + // NOTHING + } + } + throw e; + } finally { + if (null != writer) { + writer.close(); + } + } + LOG.info("Merged " + numMemDiskSegments + " segments, " + + inMemToDiskBytes + " bytes to disk to satisfy " + + "reduce memory limit"); + inMemToDiskBytes = 0; + memDiskSegments.clear(); + } else if (inMemToDiskBytes != 0) { + LOG.info("Keeping " + numMemDiskSegments + " segments, " + + inMemToDiskBytes + " bytes in memory for " + + "intermediate, on-disk merge"); + } + } + + // segments on disk + List> diskSegments = new ArrayList>(); + long onDiskBytes = inMemToDiskBytes; + Path[] onDisk = onDiskMapOutputs.toArray(new Path[onDiskMapOutputs.size()]); + for (Path file : onDisk) { + onDiskBytes += fs.getFileStatus(file).getLen(); + LOG.debug("Disk file: " + file + " Length is " + + fs.getFileStatus(file).getLen()); + diskSegments.add(new Segment(job, fs, file, codec, keepInputs, + (file.toString().endsWith( + Task.MERGED_OUTPUT_PREFIX) ? + null : mergedMapOutputsCounter) + )); + } + LOG.info("Merging " + onDisk.length + " files, " + + onDiskBytes + " bytes from disk"); + Collections.sort(diskSegments, new Comparator>() { + public int compare(Segment o1, Segment o2) { + if (o1.getLength() == o2.getLength()) { + return 0; + } + return o1.getLength() < o2.getLength() ? -1 : 1; + } + }); + + // build final list of segments from merged backed by disk + in-mem + List> finalSegments = new ArrayList>(); + long inMemBytes = createInMemorySegments(inMemoryMapOutputs, + finalSegments, 0); + LOG.info("Merging " + finalSegments.size() + " segments, " + + inMemBytes + " bytes from memory into reduce"); + if (0 != onDiskBytes) { + final int numInMemSegments = memDiskSegments.size(); + diskSegments.addAll(0, memDiskSegments); + memDiskSegments.clear(); + // Pass mergePhase only if there is a going to be intermediate + // merges. See comment where mergePhaseFinished is being set + Progress thisPhase = (mergePhaseFinished) ? null : mergePhase; + RawKeyValueIterator diskMerge = Merger.merge( + job, fs, keyClass, valueClass, diskSegments, + ioSortFactor, numInMemSegments, tmpDir, comparator, + reporter, false, spilledRecordsCounter, null, thisPhase); + diskSegments.clear(); + if (0 == finalSegments.size()) { + return diskMerge; + } + finalSegments.add(new Segment( + new RawKVIteratorReader(diskMerge, onDiskBytes), true)); + } + return Merger.merge(job, fs, keyClass, valueClass, + finalSegments, finalSegments.size(), tmpDir, + comparator, reporter, spilledRecordsCounter, null, + null); + + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java index 568f4e6ffec..5db353f99c2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/MergeThread.java @@ -34,12 +34,12 @@ abstract class MergeThread extends Thread { private AtomicInteger numPending = new AtomicInteger(0); private LinkedList> pendingToBeMerged; - protected final MergeManager manager; + protected final MergeManagerImpl manager; private final ExceptionReporter reporter; private boolean closed = false; private final int mergeFactor; - public MergeThread(MergeManager manager, int mergeFactor, + public MergeThread(MergeManagerImpl manager, int mergeFactor, ExceptionReporter reporter) { this.pendingToBeMerged = new LinkedList>(); this.manager = manager; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java new file mode 100644 index 00000000000..2cb86449e5d --- /dev/null +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/OnDiskMapOutput.java @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.mapreduce.task.reduce; + +import java.io.IOException; +import java.io.InputStream; +import java.io.OutputStream; + +import org.apache.hadoop.classification.InterfaceAudience; +import org.apache.hadoop.classification.InterfaceStability; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; + +import org.apache.hadoop.io.IOUtils; + +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapred.MapOutputFile; + +import org.apache.hadoop.mapreduce.TaskAttemptID; + +@InterfaceAudience.Private +@InterfaceStability.Unstable +class OnDiskMapOutput extends MapOutput { + private static final Log LOG = LogFactory.getLog(OnDiskMapOutput.class); + private final FileSystem localFS; + private final Path tmpOutputPath; + private final Path outputPath; + private final MergeManagerImpl merger; + private final OutputStream disk; + + public OnDiskMapOutput(TaskAttemptID mapId, TaskAttemptID reduceId, + MergeManagerImpl merger, long size, + JobConf conf, + MapOutputFile mapOutputFile, + int fetcher, boolean primaryMapOutput) + throws IOException { + super(mapId, size, primaryMapOutput); + this.merger = merger; + this.localFS = FileSystem.getLocal(conf); + outputPath = + mapOutputFile.getInputFileForWrite(mapId.getTaskID(),size); + tmpOutputPath = outputPath.suffix(String.valueOf(fetcher)); + + disk = localFS.create(tmpOutputPath); + + } + + @Override + public void shuffle(MapHost host, InputStream input, + long compressedLength, long decompressedLength, + ShuffleClientMetrics metrics, + Reporter reporter) throws IOException { + // Copy data to local-disk + long bytesLeft = compressedLength; + try { + final int BYTES_TO_READ = 64 * 1024; + byte[] buf = new byte[BYTES_TO_READ]; + while (bytesLeft > 0) { + int n = input.read(buf, 0, (int) Math.min(bytesLeft, BYTES_TO_READ)); + if (n < 0) { + throw new IOException("read past end of stream reading " + + getMapId()); + } + disk.write(buf, 0, n); + bytesLeft -= n; + metrics.inputBytes(n); + reporter.progress(); + } + + LOG.info("Read " + (compressedLength - bytesLeft) + + " bytes from map-output for " + getMapId()); + + disk.close(); + } catch (IOException ioe) { + // Close the streams + IOUtils.cleanup(LOG, input, disk); + + // Re-throw + throw ioe; + } + + // Sanity check + if (bytesLeft != 0) { + throw new IOException("Incomplete map output received for " + + getMapId() + " from " + + host.getHostName() + " (" + + bytesLeft + " bytes missing of " + + compressedLength + ")"); + } + } + + @Override + public void commit() throws IOException { + localFS.rename(tmpOutputPath, outputPath); + merger.closeOnDiskFile(outputPath); + } + + @Override + public void abort() { + try { + localFS.delete(tmpOutputPath, false); + } catch (IOException ie) { + LOG.info("failure to clean up " + tmpOutputPath, ie); + } + } + + @Override + public String getDescription() { + return "DISK"; + } +} diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java index 047e6435ccf..68131659607 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Shuffle.java @@ -21,17 +21,10 @@ import java.io.IOException; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.LocalDirAllocator; -import org.apache.hadoop.io.compress.CompressionCodec; -import org.apache.hadoop.mapred.Counters; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapred.RawKeyValueIterator; -import org.apache.hadoop.mapred.Reducer; import org.apache.hadoop.mapred.Reporter; import org.apache.hadoop.mapred.Task; -import org.apache.hadoop.mapred.Task.CombineOutputCollector; import org.apache.hadoop.mapred.TaskStatus; import org.apache.hadoop.mapred.TaskUmbilicalProtocol; import org.apache.hadoop.mapred.ShuffleConsumerPlugin; @@ -77,17 +70,21 @@ public class Shuffle implements ShuffleConsumerPlugin, ExceptionRepo this.taskStatus = context.getStatus(); this.reduceTask = context.getReduceTask(); - scheduler = - new ShuffleScheduler(jobConf, taskStatus, this, copyPhase, - context.getShuffledMapsCounter(), - context.getReduceShuffleBytes(), context.getFailedShuffleCounter()); - merger = new MergeManager(reduceId, jobConf, context.getLocalFS(), - context.getLocalDirAllocator(), reporter, context.getCodec(), - context.getCombinerClass(), context.getCombineCollector(), - context.getSpilledRecordsCounter(), - context.getReduceCombineInputCounter(), - context.getMergedMapOutputsCounter(), - this, context.getMergePhase(), context.getMapOutputFile()); + scheduler = new ShuffleScheduler(jobConf, taskStatus, this, + copyPhase, context.getShuffledMapsCounter(), + context.getReduceShuffleBytes(), context.getFailedShuffleCounter()); + merger = createMergeManager(context); + } + + protected MergeManager createMergeManager( + ShuffleConsumerPlugin.Context context) { + return new MergeManagerImpl(reduceId, jobConf, context.getLocalFS(), + context.getLocalDirAllocator(), reporter, context.getCodec(), + context.getCombinerClass(), context.getCombineCollector(), + context.getSpilledRecordsCounter(), + context.getReduceCombineInputCounter(), + context.getMergedMapOutputsCounter(), this, context.getMergePhase(), + context.getMapOutputFile()); } @Override diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index f797a6aa6ff..61d09500003 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 7bb8265629b..c14f282825c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier$Renewer diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml index bf575bc9816..e756860cade 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/resources/mapred-default.xml @@ -857,6 +857,17 @@ + + yarn.app.mapreduce.am.admin.user.env + + Environment variables for the MR App Master + processes for admin purposes. These values are set first and can be + overridden by the user env (yarn.app.mapreduce.am.env) Example : + 1) A=foo This will set the env variable A to foo + 2) B=$B:c This is inherit app master's B env variable. + + + yarn.app.mapreduce.am.command-opts -Xmx1024m diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java index 92bdc74fb31..db4308fc995 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestFetcher.java @@ -53,7 +53,7 @@ public class TestFetcher { private HttpURLConnection connection; public FakeFetcher(JobConf job, TaskAttemptID reduceId, - ShuffleScheduler scheduler, MergeManager merger, Reporter reporter, + ShuffleScheduler scheduler, MergeManagerImpl merger, Reporter reporter, ShuffleClientMetrics metrics, ExceptionReporter exceptionReporter, SecretKey jobTokenSecret, HttpURLConnection connection) { super(job, reduceId, scheduler, merger, reporter, metrics, exceptionReporter, @@ -77,7 +77,7 @@ public class TestFetcher { JobConf job = new JobConf(); TaskAttemptID id = TaskAttemptID.forName("attempt_0_1_r_1_1"); ShuffleScheduler ss = mock(ShuffleScheduler.class); - MergeManager mm = mock(MergeManager.class); + MergeManagerImpl mm = mock(MergeManagerImpl.class); Reporter r = mock(Reporter.class); ShuffleClientMetrics metrics = mock(ShuffleClientMetrics.class); ExceptionReporter except = mock(ExceptionReporter.class); @@ -132,7 +132,7 @@ public class TestFetcher { JobConf job = new JobConf(); TaskAttemptID id = TaskAttemptID.forName("attempt_0_1_r_1_1"); ShuffleScheduler ss = mock(ShuffleScheduler.class); - MergeManager mm = mock(MergeManager.class); + MergeManagerImpl mm = mock(MergeManagerImpl.class); Reporter r = mock(Reporter.class); ShuffleClientMetrics metrics = mock(ShuffleClientMetrics.class); ExceptionReporter except = mock(ExceptionReporter.class); @@ -167,10 +167,9 @@ public class TestFetcher { header.write(new DataOutputStream(bout)); ByteArrayInputStream in = new ByteArrayInputStream(bout.toByteArray()); when(connection.getInputStream()).thenReturn(in); - //Defaults to WAIT, which is what we want to test - MapOutput mapOut = new MapOutput(map1ID); + //Defaults to null, which is what we want to test when(mm.reserve(any(TaskAttemptID.class), anyLong(), anyInt())) - .thenReturn(mapOut); + .thenReturn(null); underTest.copyFromHost(host); diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java index a8669639b2a..46d797c93d3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/java/org/apache/hadoop/mapreduce/task/reduce/TestMergeManager.java @@ -32,13 +32,13 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.MapOutputFile; import org.apache.hadoop.mapreduce.MRJobConfig; -import org.apache.hadoop.mapreduce.task.reduce.MapOutput.Type; import org.junit.Assert; import org.junit.Test; public class TestMergeManager { @Test(timeout=10000) + @SuppressWarnings("unchecked") public void testMemoryMerge() throws Exception { final int TOTAL_MEM_BYTES = 10000; final int OUTPUT_SIZE = 7950; @@ -55,45 +55,47 @@ public class TestMergeManager { // reserve enough map output to cause a merge when it is committed MapOutput out1 = mgr.reserve(null, OUTPUT_SIZE, 0); - Assert.assertEquals("Should be a memory merge", - Type.MEMORY, out1.getType()); - fillOutput(out1); + Assert.assertTrue("Should be a memory merge", + (out1 instanceof InMemoryMapOutput)); + InMemoryMapOutput mout1 = (InMemoryMapOutput)out1; + fillOutput(mout1); MapOutput out2 = mgr.reserve(null, OUTPUT_SIZE, 0); - Assert.assertEquals("Should be a memory merge", - Type.MEMORY, out2.getType()); - fillOutput(out2); + Assert.assertTrue("Should be a memory merge", + (out2 instanceof InMemoryMapOutput)); + InMemoryMapOutput mout2 = (InMemoryMapOutput)out2; + fillOutput(mout2); // next reservation should be a WAIT MapOutput out3 = mgr.reserve(null, OUTPUT_SIZE, 0); - Assert.assertEquals("Should be told to wait", - Type.WAIT, out3.getType()); + Assert.assertEquals("Should be told to wait", null, out3); // trigger the first merge and wait for merge thread to start merging // and free enough output to reserve more - out1.commit(); - out2.commit(); + mout1.commit(); + mout2.commit(); mergeStart.await(); Assert.assertEquals(1, mgr.getNumMerges()); // reserve enough map output to cause another merge when committed out1 = mgr.reserve(null, OUTPUT_SIZE, 0); - Assert.assertEquals("Should be a memory merge", - Type.MEMORY, out1.getType()); - fillOutput(out1); + Assert.assertTrue("Should be a memory merge", + (out1 instanceof InMemoryMapOutput)); + mout1 = (InMemoryMapOutput)out1; + fillOutput(mout1); out2 = mgr.reserve(null, OUTPUT_SIZE, 0); - Assert.assertEquals("Should be a memory merge", - Type.MEMORY, out2.getType()); - fillOutput(out2); + Assert.assertTrue("Should be a memory merge", + (out2 instanceof InMemoryMapOutput)); + mout2 = (InMemoryMapOutput)out2; + fillOutput(mout2); - // next reservation should be a WAIT + // next reservation should be null out3 = mgr.reserve(null, OUTPUT_SIZE, 0); - Assert.assertEquals("Should be told to wait", - Type.WAIT, out3.getType()); + Assert.assertEquals("Should be told to wait", null, out3); // commit output *before* merge thread completes - out1.commit(); - out2.commit(); + mout1.commit(); + mout2.commit(); // allow the first merge to complete mergeComplete.await(); @@ -110,7 +112,7 @@ public class TestMergeManager { 0, reporter.getNumExceptions()); } - private void fillOutput(MapOutput output) throws IOException { + private void fillOutput(InMemoryMapOutput output) throws IOException { BoundedByteArrayOutputStream stream = output.getArrayStream(); int count = stream.getLimit(); for (int i=0; i < count; ++i) { @@ -118,7 +120,7 @@ public class TestMergeManager { } } - private static class StubbedMergeManager extends MergeManager { + private static class StubbedMergeManager extends MergeManagerImpl { private TestMergeThread mergeThread; public StubbedMergeManager(JobConf conf, ExceptionReporter reporter, @@ -129,7 +131,7 @@ public class TestMergeManager { } @Override - protected MergeThread, Text, Text> createInMemoryMerger() { + protected MergeThread, Text, Text> createInMemoryMerger() { mergeThread = new TestMergeThread(this, getExceptionReporter()); return mergeThread; } @@ -140,12 +142,12 @@ public class TestMergeManager { } private static class TestMergeThread - extends MergeThread, Text, Text> { + extends MergeThread, Text, Text> { private AtomicInteger numMerges; private CyclicBarrier mergeStart; private CyclicBarrier mergeComplete; - public TestMergeThread(MergeManager mergeManager, + public TestMergeThread(MergeManagerImpl mergeManager, ExceptionReporter reporter) { super(mergeManager, Integer.MAX_VALUE, reporter); numMerges = new AtomicInteger(0); @@ -162,11 +164,11 @@ public class TestMergeManager { } @Override - public void merge(List> inputs) + public void merge(List> inputs) throws IOException { synchronized (this) { numMerges.incrementAndGet(); - for (MapOutput input : inputs) { + for (InMemoryMapOutput input : inputs) { manager.unreserve(input.getSize()); } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml index caf65d7efe7..9d63ee9dfed 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml @@ -54,4 +54,20 @@ test + + + + + org.apache.rat + apache-rat-plugin + + + src/test/resources/job_1329348432655_0001_conf.xml + src/test/resources/job_1329348432655_0001-1329348443227-user-Sleep+job-1329348468601-10-1-SUCCEEDED-default.jhist + + + + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java index cfa7e290595..2f3c57d6b8c 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/CompletedJob.java @@ -36,6 +36,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.mapred.JobACLsManager; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.TaskID; @@ -183,13 +184,13 @@ public class CompletedJob implements org.apache.hadoop.mapreduce.v2.app.job.Job } @Override - public synchronized TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents( + public synchronized TaskCompletionEvent[] getMapAttemptCompletionEvents( int startIndex, int maxEvents) { if (mapCompletionEvents == null) { constructTaskAttemptCompletionEvents(); } - return getAttemptCompletionEvents(mapCompletionEvents, - startIndex, maxEvents); + return TypeConverter.fromYarn(getAttemptCompletionEvents( + mapCompletionEvents, startIndex, maxEvents)); } private static TaskAttemptCompletionEvent[] getAttemptCompletionEvents( diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java index 0bfffac1b07..ce51c390b15 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/PartialJob.java @@ -25,6 +25,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.Counters; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; @@ -154,7 +155,7 @@ public class PartialJob implements org.apache.hadoop.mapreduce.v2.app.job.Job { } @Override - public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents( + public TaskCompletionEvent[] getMapAttemptCompletionEvents( int startIndex, int maxEvents) { return null; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java index da983948d10..00b62a79734 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/MockHistoryJobs.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.hs; import java.io.IOException; @@ -6,6 +23,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; +import org.apache.hadoop.mapred.TaskCompletionEvent; import org.apache.hadoop.mapreduce.JobACL; import org.apache.hadoop.mapreduce.v2.api.records.AMInfo; import org.apache.hadoop.mapreduce.v2.api.records.JobId; @@ -126,7 +144,7 @@ public class MockHistoryJobs extends MockJobs { } @Override - public TaskAttemptCompletionEvent[] getMapAttemptCompletionEvents( + public TaskCompletionEvent[] getMapAttemptCompletionEvents( int startIndex, int maxEvents) { return job.getMapAttemptCompletionEvents(startIndex, maxEvents); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java index 1080ebe5325..69b4bd7ac3b 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.mapreduce.v2.hs; import static junit.framework.Assert.assertEquals; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml index 65e5f1038e9..f7b27d4246a 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/pom.xml @@ -148,6 +148,15 @@ + + org.apache.rat + apache-rat-plugin + + + src/test/java/org/apache/hadoop/cli/data60bytes + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java index f2eb71c2e92..a283954efca 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java @@ -33,6 +33,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.CommonConfigurationKeysPublic; import org.apache.hadoop.http.HttpConfig; +import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.JobID; import org.apache.hadoop.mapreduce.JobStatus; import org.apache.hadoop.mapreduce.MRJobConfig; @@ -69,14 +70,16 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.YarnException; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; import org.apache.hadoop.yarn.ipc.RPCUtil; import org.apache.hadoop.yarn.ipc.YarnRPC; -import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier; +import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier; import org.apache.hadoop.yarn.util.BuilderUtils; +import org.apache.hadoop.yarn.util.ProtoUtils; public class ClientServiceDelegate { private static final Log LOG = LogFactory.getLog(ClientServiceDelegate.class); @@ -176,13 +179,10 @@ public class ClientServiceDelegate { serviceAddr = NetUtils.createSocketAddrForHost( application.getHost(), application.getRpcPort()); if (UserGroupInformation.isSecurityEnabled()) { - String clientTokenEncoded = application.getClientToken(); - Token clientToken = - new Token(); - clientToken.decodeFromUrlString(clientTokenEncoded); - // RPC layer client expects ip:port as service for tokens - SecurityUtil.setTokenService(clientToken, serviceAddr); - newUgi.addToken(clientToken); + ClientToken clientToken = application.getClientToken(); + Token token = + ProtoUtils.convertFromProtoFormat(clientToken, serviceAddr); + newUgi.addToken(token); } LOG.debug("Connecting to " + serviceAddr); final InetSocketAddress finalServiceAddr = serviceAddr; diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java index 7e46433319b..c33ab38c150 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/NotRunningJob.java @@ -62,8 +62,8 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskReport; import org.apache.hadoop.mapreduce.v2.api.records.TaskState; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -86,10 +86,9 @@ public class NotRunningJob implements MRClientProtocol { .newRecordInstance(ApplicationAttemptId.class); // Setting AppState to NEW and finalStatus to UNDEFINED as they are never - // used - // for a non running job + // used for a non running job return BuilderUtils.newApplicationReport(unknownAppId, unknownAttemptId, - "N/A", "N/A", "N/A", "N/A", 0, "", YarnApplicationState.NEW, "N/A", + "N/A", "N/A", "N/A", "N/A", 0, null, YarnApplicationState.NEW, "N/A", "N/A", 0, 0, FinalApplicationStatus.UNDEFINED, null, "N/A"); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java index 427f5a03f88..c401b93c964 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java @@ -101,7 +101,7 @@ public class YARNRunner implements ClientProtocol { private Configuration conf; private final FileContext defaultFileContext; - /* usually is false unless the jobclient getdelegation token is + /* usually is false unless the jobclient get delegation token is * called. This is a hack wherein we do return a token from RM * on getDelegationtoken but due to the restricted api on jobclient * we just add a job history DT token when submitting a job. @@ -165,12 +165,12 @@ public class YARNRunner implements ClientProtocol { @Override public TaskTrackerInfo[] getActiveTrackers() throws IOException, InterruptedException { - return resMgrDelegate.getActiveTrackers(); + return resMgrDelegate.getActiveTrackers(); } @Override public JobStatus[] getAllJobs() throws IOException, InterruptedException { - return resMgrDelegate.getAllJobs(); + return resMgrDelegate.getAllJobs(); } @Override @@ -394,14 +394,31 @@ public class YARNRunner implements ClientProtocol { MRJobConfig.MR_AM_LOG_LEVEL, MRJobConfig.DEFAULT_MR_AM_LOG_LEVEL); MRApps.addLog4jSystemProperties(logLevel, logSize, vargs); + // Check for Java Lib Path usage in MAP and REDUCE configs + warnForJavaLibPath(conf.get(MRJobConfig.MAP_JAVA_OPTS,""), "map", + MRJobConfig.MAP_JAVA_OPTS, MRJobConfig.MAP_ENV); + warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS,""), "map", + MRJobConfig.MAPRED_MAP_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV); + warnForJavaLibPath(conf.get(MRJobConfig.REDUCE_JAVA_OPTS,""), "reduce", + MRJobConfig.REDUCE_JAVA_OPTS, MRJobConfig.REDUCE_ENV); + warnForJavaLibPath(conf.get(MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS,""), "reduce", + MRJobConfig.MAPRED_REDUCE_ADMIN_JAVA_OPTS, MRJobConfig.MAPRED_ADMIN_USER_ENV); + // Add AM admin command opts before user command opts // so that it can be overridden by user - vargs.add(conf.get(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, - MRJobConfig.DEFAULT_MR_AM_ADMIN_COMMAND_OPTS)); + String mrAppMasterAdminOptions = conf.get(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, + MRJobConfig.DEFAULT_MR_AM_ADMIN_COMMAND_OPTS); + warnForJavaLibPath(mrAppMasterAdminOptions, "app master", + MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, MRJobConfig.MR_AM_ADMIN_USER_ENV); + vargs.add(mrAppMasterAdminOptions); + + // Add AM user command opts + String mrAppMasterUserOptions = conf.get(MRJobConfig.MR_AM_COMMAND_OPTS, + MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS); + warnForJavaLibPath(mrAppMasterUserOptions, "app master", + MRJobConfig.MR_AM_COMMAND_OPTS, MRJobConfig.MR_AM_ENV); + vargs.add(mrAppMasterUserOptions); - vargs.add(conf.get(MRJobConfig.MR_AM_COMMAND_OPTS, - MRJobConfig.DEFAULT_MR_AM_COMMAND_OPTS)); - vargs.add(MRJobConfig.APPLICATION_MASTER_CLASS); vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + Path.SEPARATOR + ApplicationConstants.STDOUT); @@ -425,6 +442,9 @@ public class YARNRunner implements ClientProtocol { Map environment = new HashMap(); MRApps.setClasspath(environment, conf); + // Setup the environment variables for Admin first + MRApps.setEnvFromInputString(environment, + conf.get(MRJobConfig.MR_AM_ADMIN_USER_ENV)); // Setup the environment variables (LD_LIBRARY_PATH, etc) MRApps.setEnvFromInputString(environment, conf.get(MRJobConfig.MR_AM_ENV)); @@ -582,4 +602,15 @@ public class YARNRunner implements ClientProtocol { throws IOException { return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID); } + + private static void warnForJavaLibPath(String opts, String component, + String javaConf, String envConf) { + if (opts != null && opts.contains("-Djava.library.path")) { + LOG.warn("Usage of -Djava.library.path in " + javaConf + " can cause " + + "programs to no longer function if hadoop native libraries " + + "are used. These values should be set as part of the " + + "LD_LIBRARY_PATH in the " + component + " JVM env using " + + envConf + " config settings."); + } + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 10ab75be7d6..4866b2efef2 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.mapreduce.v2.security.client.ClientHSSecurityInfo diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java index 8c1ba60763c..d2ea74e6940 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java @@ -90,8 +90,8 @@ public abstract class NotificationTestCase extends HadoopTestCase { } public static class NotificationServlet extends HttpServlet { - public static int counter = 0; - public static int failureCounter = 0; + public static volatile int counter = 0; + public static volatile int failureCounter = 0; private static final long serialVersionUID = 1L; protected void doGet(HttpServletRequest req, HttpServletResponse res) @@ -155,7 +155,11 @@ public abstract class NotificationTestCase extends HadoopTestCase { System.out.println(launchWordCount(this.createJobConf(), "a b c d e f g h", 1, 1)); - Thread.sleep(2000); + boolean keepTrying = true; + for (int tries = 0; tries < 30 && keepTrying; tries++) { + Thread.sleep(50); + keepTrying = !(NotificationServlet.counter == 2); + } assertEquals(2, NotificationServlet.counter); assertEquals(0, NotificationServlet.failureCounter); @@ -173,14 +177,22 @@ public abstract class NotificationTestCase extends HadoopTestCase { // run a job with KILLED status System.out.println(UtilsForTests.runJobKill(this.createJobConf(), inDir, outDir).getID()); - Thread.sleep(2000); + keepTrying = true; + for (int tries = 0; tries < 30 && keepTrying; tries++) { + Thread.sleep(50); + keepTrying = !(NotificationServlet.counter == 4); + } assertEquals(4, NotificationServlet.counter); assertEquals(0, NotificationServlet.failureCounter); // run a job with FAILED status System.out.println(UtilsForTests.runJobFail(this.createJobConf(), inDir, outDir).getID()); - Thread.sleep(2000); + keepTrying = true; + for (int tries = 0; tries < 30 && keepTrying; tries++) { + Thread.sleep(50); + keepTrying = !(NotificationServlet.counter == 6); + } assertEquals(6, NotificationServlet.counter); assertEquals(0, NotificationServlet.failureCounter); } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java index 6a67bbd3a6d..5675742cfd1 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestYARNRunner.java @@ -25,6 +25,7 @@ import static org.mockito.Mockito.spy; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; +import java.io.ByteArrayOutputStream; import java.io.File; import java.io.FileOutputStream; import java.io.IOException; @@ -83,6 +84,11 @@ import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.log4j.Appender; +import org.apache.log4j.Layout; +import org.apache.log4j.Logger; +import org.apache.log4j.SimpleLayout; +import org.apache.log4j.WriterAppender; import org.junit.Before; import org.junit.Test; import org.mockito.invocation.InvocationOnMock; @@ -112,6 +118,7 @@ public class TestYARNRunner extends TestCase { public void setUp() throws Exception { resourceMgrDelegate = mock(ResourceMgrDelegate.class); conf = new YarnConfiguration(); + conf.set(YarnConfiguration.RM_PRINCIPAL, "mapred/host@REALM"); clientCache = new ClientCache(conf, resourceMgrDelegate); clientCache = spy(clientCache); yarnRunner = new YARNRunner(conf, resourceMgrDelegate, clientCache); @@ -188,7 +195,7 @@ public class TestYARNRunner extends TestCase { @Test public void testResourceMgrDelegate() throws Exception { - /* we not want a mock of resourcemgr deleagte */ + /* we not want a mock of resource mgr delegate */ final ClientRMProtocol clientRMProtocol = mock(ClientRMProtocol.class); ResourceMgrDelegate delegate = new ResourceMgrDelegate(conf) { @Override @@ -255,6 +262,9 @@ public class TestYARNRunner extends TestCase { @Test public void testHistoryServerToken() throws Exception { + //Set the master principal in the config + conf.set(YarnConfiguration.RM_PRINCIPAL,"foo@LOCAL"); + final String masterPrincipal = Master.getMasterPrincipal(conf); final MRClientProtocol hsProxy = mock(MRClientProtocol.class); @@ -264,7 +274,7 @@ public class TestYARNRunner extends TestCase { GetDelegationTokenRequest request = (GetDelegationTokenRequest)invocation.getArguments()[0]; // check that the renewer matches the cluster's RM principal - assertEquals(request.getRenewer(), masterPrincipal); + assertEquals(masterPrincipal, request.getRenewer() ); DelegationToken token = recordFactory.newRecordInstance(DelegationToken.class); @@ -356,4 +366,53 @@ public class TestYARNRunner extends TestCase { assertTrue("AM admin command opts is after user command opts.", adminIndex < userIndex); } } + @Test + public void testWarnCommandOpts() throws Exception { + Logger logger = Logger.getLogger(YARNRunner.class); + + ByteArrayOutputStream bout = new ByteArrayOutputStream(); + Layout layout = new SimpleLayout(); + Appender appender = new WriterAppender(layout, bout); + logger.addAppender(appender); + + JobConf jobConf = new JobConf(); + + jobConf.set(MRJobConfig.MR_AM_ADMIN_COMMAND_OPTS, "-Djava.net.preferIPv4Stack=true -Djava.library.path=foo"); + jobConf.set(MRJobConfig.MR_AM_COMMAND_OPTS, "-Xmx1024m -Djava.library.path=bar"); + + YARNRunner yarnRunner = new YARNRunner(jobConf); + + File jobxml = new File(testWorkDir, MRJobConfig.JOB_CONF_FILE); + OutputStream out = new FileOutputStream(jobxml); + conf.writeXml(out); + out.close(); + + File jobsplit = new File(testWorkDir, MRJobConfig.JOB_SPLIT); + out = new FileOutputStream(jobsplit); + out.close(); + + File jobsplitmetainfo = new File(testWorkDir, MRJobConfig.JOB_SPLIT_METAINFO); + out = new FileOutputStream(jobsplitmetainfo); + out.close(); + + File appTokens = new File(testWorkDir, MRJobConfig.APPLICATION_TOKENS_FILE); + out = new FileOutputStream(appTokens); + out.close(); + + @SuppressWarnings("unused") + ApplicationSubmissionContext submissionContext = + yarnRunner.createApplicationSubmissionContext(jobConf, testWorkDir.toString(), new Credentials()); + + String logMsg = bout.toString(); + assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.admin-command-opts can cause programs to no " + + "longer function if hadoop native libraries are used. These values " + + "should be set as part of the LD_LIBRARY_PATH in the app master JVM " + + "env using yarn.app.mapreduce.am.admin.user.env config settings.")); + assertTrue(logMsg.contains("WARN - Usage of -Djava.library.path in " + + "yarn.app.mapreduce.am.command-opts can cause programs to no longer " + + "function if hadoop native libraries are used. These values should " + + "be set as part of the LD_LIBRARY_PATH in the app master JVM env " + + "using yarn.app.mapreduce.am.env config settings.")); + } } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml index 5b2231e0dbd..199791eedbf 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/pom.xml @@ -134,7 +134,15 @@ Max - + + org.apache.rat + apache-rat-plugin + + + src/main/java/org/apache/hadoop/examples/dancing/puzzle1.dta + + + diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java index 08d7cfad2dc..8ddd64fd1f3 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/QuasiMonteCarlo.java @@ -21,6 +21,7 @@ package org.apache.hadoop.examples; import java.io.IOException; import java.math.BigDecimal; import java.math.RoundingMode; +import java.util.Random; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.conf.Configured; @@ -77,8 +78,7 @@ public class QuasiMonteCarlo extends Configured implements Tool { static final String DESCRIPTION = "A map/reduce program that estimates Pi using a quasi-Monte Carlo method."; /** tmp directory for input/output */ - static private final Path TMP_DIR = new Path( - QuasiMonteCarlo.class.getSimpleName() + "_TMP_3_141592654"); + static private final String TMP_DIR_PREFIX = QuasiMonteCarlo.class.getSimpleName(); /** 2-dimensional Halton sequence {H(i)}, * where H(i) is a 2-dimensional point and i >= 1 is the index. @@ -228,9 +228,9 @@ public class QuasiMonteCarlo extends Configured implements Tool { @Override public void cleanup(Context context) throws IOException { //write output to a file - Path outDir = new Path(TMP_DIR, "out"); - Path outFile = new Path(outDir, "reduce-out"); Configuration conf = context.getConfiguration(); + Path outDir = new Path(conf.get(FileOutputFormat.OUTDIR)); + Path outFile = new Path(outDir, "reduce-out"); FileSystem fileSys = FileSystem.get(conf); SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, outFile, LongWritable.class, LongWritable.class, @@ -246,7 +246,7 @@ public class QuasiMonteCarlo extends Configured implements Tool { * @return the estimated value of Pi */ public static BigDecimal estimatePi(int numMaps, long numPoints, - Configuration conf + Path tmpDir, Configuration conf ) throws IOException, ClassNotFoundException, InterruptedException { Job job = new Job(conf); //setup job conf @@ -269,14 +269,14 @@ public class QuasiMonteCarlo extends Configured implements Tool { job.setSpeculativeExecution(false); //setup input/output directories - final Path inDir = new Path(TMP_DIR, "in"); - final Path outDir = new Path(TMP_DIR, "out"); + final Path inDir = new Path(tmpDir, "in"); + final Path outDir = new Path(tmpDir, "out"); FileInputFormat.setInputPaths(job, inDir); FileOutputFormat.setOutputPath(job, outDir); final FileSystem fs = FileSystem.get(conf); - if (fs.exists(TMP_DIR)) { - throw new IOException("Tmp directory " + fs.makeQualified(TMP_DIR) + if (fs.exists(tmpDir)) { + throw new IOException("Tmp directory " + fs.makeQualified(tmpDir) + " already exists. Please remove it first."); } if (!fs.mkdirs(inDir)) { @@ -325,7 +325,7 @@ public class QuasiMonteCarlo extends Configured implements Tool { .multiply(BigDecimal.valueOf(numInside.get())) .divide(numTotal, RoundingMode.HALF_UP); } finally { - fs.delete(TMP_DIR, true); + fs.delete(tmpDir, true); } } @@ -344,12 +344,15 @@ public class QuasiMonteCarlo extends Configured implements Tool { final int nMaps = Integer.parseInt(args[0]); final long nSamples = Long.parseLong(args[1]); + long now = System.currentTimeMillis(); + int rand = new Random().nextInt(Integer.MAX_VALUE); + final Path tmpDir = new Path(TMP_DIR_PREFIX + "_" + now + "_" + rand); System.out.println("Number of Maps = " + nMaps); System.out.println("Samples per Map = " + nSamples); System.out.println("Estimated value of Pi is " - + estimatePi(nMaps, nSamples, getConf())); + + estimatePi(nMaps, nSamples, tmpDir, getConf())); return 0; } diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java index 94c647711d9..e97d9c3c424 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/dancing/DistributedPentomino.java @@ -174,16 +174,16 @@ public class DistributedPentomino extends Configured implements Tool { return 2; } // check for passed parameters, otherwise use defaults - int width = PENT_WIDTH; - int height = PENT_HEIGHT; - int depth = PENT_DEPTH; + int width = conf.getInt(Pentomino.WIDTH, PENT_WIDTH); + int height = conf.getInt(Pentomino.HEIGHT, PENT_HEIGHT); + int depth = conf.getInt(Pentomino.DEPTH, PENT_DEPTH); for (int i = 0; i < args.length; i++) { if (args[i].equalsIgnoreCase("-depth")) { - depth = Integer.parseInt(args[i++].trim()); + depth = Integer.parseInt(args[++i].trim()); } else if (args[i].equalsIgnoreCase("-height")) { - height = Integer.parseInt(args[i++].trim()); + height = Integer.parseInt(args[++i].trim()); } else if (args[i].equalsIgnoreCase("-width") ) { - width = Integer.parseInt(args[i++].trim()); + width = Integer.parseInt(args[++i].trim()); } } // now set the values within conf for M/R tasks to read, this diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java index 54575165746..56b358ef570 100644 --- a/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java +++ b/hadoop-mapreduce-project/hadoop-mapreduce-examples/src/test/java/org/apache/hadoop/examples/TestWordStats.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.examples; import static org.junit.Assert.assertEquals; diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml index 294441d4ade..f06ca48cb56 100644 --- a/hadoop-mapreduce-project/pom.xml +++ b/hadoop-mapreduce-project/pom.xml @@ -214,9 +214,11 @@ org.apache.rat apache-rat-plugin - - pom.xml - + + .eclipse.templates/ + CHANGES.txt + lib/jdiff/** + diff --git a/hadoop-maven-plugins/pom.xml b/hadoop-maven-plugins/pom.xml new file mode 100644 index 00000000000..97998452dd8 --- /dev/null +++ b/hadoop-maven-plugins/pom.xml @@ -0,0 +1,76 @@ + + + + 4.0.0 + + org.apache.hadoop + hadoop-project + 3.0.0-SNAPSHOT + ../hadoop-project + + org.apache.hadoop + hadoop-maven-plugins + maven-plugin + Apache Hadoop Maven Plugins + + 3.0 + + + + org.apache.maven + maven-plugin-api + ${maven.dependency.version} + + + org.apache.maven + maven-core + ${maven.dependency.version} + + + org.apache.maven.plugin-tools + maven-plugin-annotations + ${maven.dependency.version} + provided + + + junit + junit + 3.8.1 + test + + + + + + org.apache.maven.plugins + maven-plugin-plugin + ${maven.dependency.version} + + true + + + + mojo-descriptor + + descriptor + + + + + + + diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java new file mode 100644 index 00000000000..144ee135623 --- /dev/null +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java @@ -0,0 +1,118 @@ +/* + * Copyright 2012 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.maven.plugin.util; + +import org.apache.maven.plugin.Mojo; + +import java.io.BufferedReader; +import java.io.IOException; +import java.io.InputStream; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.List; + +/** + * Exec is a helper class for executing an external process from a mojo. + */ +public class Exec { + private Mojo mojo; + + /** + * Creates a new Exec instance for executing an external process from the given + * mojo. + * + * @param mojo Mojo executing external process + */ + public Exec(Mojo mojo) { + this.mojo = mojo; + } + + /** + * Runs the specified command and saves each line of the command's output to + * the given list. + * + * @param command List containing command and all arguments + * @param output List in/out parameter to receive command output + * @return int exit code of command + */ + public int run(List command, List output) { + int retCode = 1; + ProcessBuilder pb = new ProcessBuilder(command); + try { + Process p = pb.start(); + OutputBufferThread stdOut = new OutputBufferThread(p.getInputStream()); + OutputBufferThread stdErr = new OutputBufferThread(p.getErrorStream()); + stdOut.start(); + stdErr.start(); + retCode = p.waitFor(); + if (retCode != 0) { + mojo.getLog().warn(command + " failed with error code " + retCode); + for (String s : stdErr.getOutput()) { + mojo.getLog().debug(s); + } + } else { + stdOut.join(); + stdErr.join(); + output.addAll(stdOut.getOutput()); + } + } catch (Exception ex) { + mojo.getLog().warn(command + " failed: " + ex.toString()); + } + return retCode; + } + + /** + * OutputBufferThread is a background thread for consuming and storing output + * of the external process. + */ + private static class OutputBufferThread extends Thread { + private List output; + private BufferedReader reader; + + /** + * Creates a new OutputBufferThread to consume the given InputStream. + * + * @param is InputStream to consume + */ + public OutputBufferThread(InputStream is) { + this.setDaemon(true); + output = new ArrayList(); + reader = new BufferedReader(new InputStreamReader(is)); + } + + @Override + public void run() { + try { + String line = reader.readLine(); + while (line != null) { + output.add(line); + line = reader.readLine(); + } + } catch (IOException ex) { + throw new RuntimeException("make failed with error code " + ex.toString()); + } + } + + /** + * Returns every line consumed from the input. + * + * @return List every line consumed from the input + */ + public List getOutput() { + return output; + } + } +} diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java new file mode 100644 index 00000000000..73f2ca6ddbd --- /dev/null +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/FileSetUtils.java @@ -0,0 +1,61 @@ +/* + * Copyright 2012 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.maven.plugin.util; + +import org.apache.maven.model.FileSet; +import org.codehaus.plexus.util.FileUtils; + +import java.io.File; +import java.io.IOException; +import java.util.List; + +/** + * FileSetUtils contains helper methods for mojo implementations that need to + * work with a Maven FileSet. + */ +public class FileSetUtils { + + /** + * Returns a string containing every element of the given list, with each + * element separated by a comma. + * + * @param list List of all elements + * @return String containing every element, comma-separated + */ + private static String getCommaSeparatedList(List list) { + StringBuilder buffer = new StringBuilder(); + String separator = ""; + for (Object e : list) { + buffer.append(separator).append(e); + separator = ","; + } + return buffer.toString(); + } + + /** + * Converts a Maven FileSet to a list of File objects. + * + * @param source FileSet to convert + * @return List containing every element of the FileSet as a File + * @throws IOException if an I/O error occurs while trying to find the files + */ + @SuppressWarnings("unchecked") + public static List convertFileSetToFiles(FileSet source) throws IOException { + String includes = getCommaSeparatedList(source.getIncludes()); + String excludes = getCommaSeparatedList(source.getExcludes()); + return FileUtils.getFiles(new File(source.getDirectory()), includes, excludes); + } +} diff --git a/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java new file mode 100644 index 00000000000..b489c0a7c0d --- /dev/null +++ b/hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/versioninfo/VersionInfoMojo.java @@ -0,0 +1,343 @@ +/* + * Copyright 2012 The Apache Software Foundation. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.maven.plugin.versioninfo; + +import org.apache.hadoop.maven.plugin.util.Exec; +import org.apache.hadoop.maven.plugin.util.FileSetUtils; +import org.apache.maven.model.FileSet; +import org.apache.maven.plugin.AbstractMojo; +import org.apache.maven.plugin.MojoExecutionException; +import org.apache.maven.plugins.annotations.LifecyclePhase; +import org.apache.maven.plugins.annotations.Mojo; +import org.apache.maven.plugins.annotations.Parameter; +import org.apache.maven.project.MavenProject; + +import java.io.File; +import java.io.IOException; +import java.io.RandomAccessFile; +import java.security.MessageDigest; +import java.security.NoSuchAlgorithmException; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Comparator; +import java.util.Date; +import java.util.List; +import java.util.TimeZone; + +/** + * VersionInfoMojo calculates information about the current version of the + * codebase and exports the information as properties for further use in a Maven + * build. The version information includes build time, SCM URI, SCM branch, SCM + * commit, and an MD5 checksum of the contents of the files in the codebase. + */ +@Mojo(name="version-info", defaultPhase=LifecyclePhase.INITIALIZE) +public class VersionInfoMojo extends AbstractMojo { + + @Parameter(defaultValue="${project}") + private MavenProject project; + + @Parameter(required=true) + private FileSet source; + + @Parameter(defaultValue="version-info.build.time") + private String buildTimeProperty; + + @Parameter(defaultValue="version-info.source.md5") + private String md5Property; + + @Parameter(defaultValue="version-info.scm.uri") + private String scmUriProperty; + + @Parameter(defaultValue="version-info.scm.branch") + private String scmBranchProperty; + + @Parameter(defaultValue="version-info.scm.commit") + private String scmCommitProperty; + + @Parameter(defaultValue="git") + private String gitCommand; + + @Parameter(defaultValue="svn") + private String svnCommand; + + private enum SCM {NONE, SVN, GIT} + + @Override + public void execute() throws MojoExecutionException { + try { + SCM scm = determineSCM(); + project.getProperties().setProperty(buildTimeProperty, getBuildTime()); + project.getProperties().setProperty(scmUriProperty, getSCMUri(scm)); + project.getProperties().setProperty(scmBranchProperty, getSCMBranch(scm)); + project.getProperties().setProperty(scmCommitProperty, getSCMCommit(scm)); + project.getProperties().setProperty(md5Property, computeMD5()); + } catch (Throwable ex) { + throw new MojoExecutionException(ex.toString(), ex); + } + } + + /** + * Returns a string representing current build time. + * + * @return String representing current build time + */ + private String getBuildTime() { + DateFormat dateFormat = new SimpleDateFormat("yyyy-MM-dd'T'HH:mm'Z'"); + dateFormat.setTimeZone(TimeZone.getTimeZone("UTC")); + return dateFormat.format(new Date()); + } + private List scmOut; + + /** + * Determines which SCM is in use (Subversion, git, or none) and captures + * output of the SCM command for later parsing. + * + * @return SCM in use for this build + * @throws Exception if any error occurs attempting to determine SCM + */ + private SCM determineSCM() throws Exception { + Exec exec = new Exec(this); + SCM scm = SCM.NONE; + scmOut = new ArrayList(); + int ret = exec.run(Arrays.asList(svnCommand, "info"), scmOut); + if (ret == 0) { + scm = SCM.SVN; + } else { + ret = exec.run(Arrays.asList(gitCommand, "branch"), scmOut); + if (ret == 0) { + ret = exec.run(Arrays.asList(gitCommand, "remote", "-v"), scmOut); + if (ret != 0) { + scm = SCM.NONE; + scmOut = null; + } else { + ret = exec.run(Arrays.asList(gitCommand, "log", "-n", "1"), scmOut); + if (ret != 0) { + scm = SCM.NONE; + scmOut = null; + } else { + scm = SCM.GIT; + } + } + } + } + if (scmOut != null) { + getLog().debug(scmOut.toString()); + } + getLog().info("SCM: " + scm); + return scm; + } + + /** + * Return URI and branch of Subversion repository. + * + * @param str String Subversion info output containing URI and branch + * @return String[] containing URI and branch + */ + private String[] getSvnUriInfo(String str) { + String[] res = new String[]{"Unknown", "Unknown"}; + try { + String path = str; + int index = path.indexOf("trunk"); + if (index > -1) { + res[0] = path.substring(0, index - 1); + res[1] = "trunk"; + } else { + index = path.indexOf("branches"); + if (index > -1) { + res[0] = path.substring(0, index - 1); + int branchIndex = index + "branches".length() + 1; + index = path.indexOf("/", branchIndex); + if (index > -1) { + res[1] = path.substring(branchIndex, index); + } else { + res[1] = path.substring(branchIndex); + } + } + } + } catch (Exception ex) { + getLog().warn("Could not determine URI & branch from SVN URI: " + str); + } + return res; + } + + /** + * Parses SCM output and returns URI of SCM. + * + * @param scm SCM in use for this build + * @return String URI of SCM + */ + private String getSCMUri(SCM scm) { + String uri = "Unknown"; + switch (scm) { + case SVN: + for (String s : scmOut) { + if (s.startsWith("URL:")) { + uri = s.substring(4).trim(); + uri = getSvnUriInfo(uri)[0]; + break; + } + } + break; + case GIT: + for (String s : scmOut) { + if (s.startsWith("origin") && s.endsWith("(fetch)")) { + uri = s.substring("origin".length()); + uri = uri.substring(0, uri.length() - "(fetch)".length()); + break; + } + } + break; + } + return uri.trim(); + } + + /** + * Parses SCM output and returns commit of SCM. + * + * @param scm SCM in use for this build + * @return String commit of SCM + */ + private String getSCMCommit(SCM scm) { + String commit = "Unknown"; + switch (scm) { + case SVN: + for (String s : scmOut) { + if (s.startsWith("Revision:")) { + commit = s.substring("Revision:".length()); + break; + } + } + break; + case GIT: + for (String s : scmOut) { + if (s.startsWith("commit")) { + commit = s.substring("commit".length()); + break; + } + } + break; + } + return commit.trim(); + } + + /** + * Parses SCM output and returns branch of SCM. + * + * @param scm SCM in use for this build + * @return String branch of SCM + */ + private String getSCMBranch(SCM scm) { + String branch = "Unknown"; + switch (scm) { + case SVN: + for (String s : scmOut) { + if (s.startsWith("URL:")) { + branch = s.substring(4).trim(); + branch = getSvnUriInfo(branch)[1]; + break; + } + } + break; + case GIT: + for (String s : scmOut) { + if (s.startsWith("*")) { + branch = s.substring("*".length()); + break; + } + } + break; + } + return branch.trim(); + } + + /** + * Reads and returns the full contents of the specified file. + * + * @param file File to read + * @return byte[] containing full contents of file + * @throws IOException if there is an I/O error while reading the file + */ + private byte[] readFile(File file) throws IOException { + RandomAccessFile raf = new RandomAccessFile(file, "r"); + byte[] buffer = new byte[(int) raf.length()]; + raf.readFully(buffer); + raf.close(); + return buffer; + } + + /** + * Given a list of files, computes and returns an MD5 checksum of the full + * contents of all files. + * + * @param files List containing every file to input into the MD5 checksum + * @return byte[] calculated MD5 checksum + * @throws IOException if there is an I/O error while reading a file + * @throws NoSuchAlgorithmException if the MD5 algorithm is not supported + */ + private byte[] computeMD5(List files) throws IOException, NoSuchAlgorithmException { + MessageDigest md5 = MessageDigest.getInstance("MD5"); + for (File file : files) { + getLog().debug("Computing MD5 for: " + file); + md5.update(readFile(file)); + } + return md5.digest(); + } + + /** + * Converts bytes to a hexadecimal string representation and returns it. + * + * @param array byte[] to convert + * @return String containing hexadecimal representation of bytes + */ + private String byteArrayToString(byte[] array) { + StringBuilder sb = new StringBuilder(); + for (byte b : array) { + sb.append(Integer.toHexString(0xff & b)); + } + return sb.toString(); + } + + /** + * Computes and returns an MD5 checksum of the contents of all files in the + * input Maven FileSet. + * + * @return String containing hexadecimal representation of MD5 checksum + * @throws Exception if there is any error while computing the MD5 checksum + */ + private String computeMD5() throws Exception { + List files = FileSetUtils.convertFileSetToFiles(source); + // File order of MD5 calculation is significant. Sorting is done on + // unix-format names, case-folded, in order to get a platform-independent + // sort and calculate the same MD5 on all platforms. + Collections.sort(files, new Comparator() { + @Override + public int compare(File lhs, File rhs) { + return normalizePath(lhs).compareTo(normalizePath(rhs)); + } + + private String normalizePath(File file) { + return file.getPath().toUpperCase().replaceAll("\\\\", "/"); + } + }); + byte[] md5 = computeMD5(files); + String md5str = byteArrayToString(md5); + getLog().info("Computed MD5: " + md5str); + return md5str; + } +} diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml index 4b833c5b37c..3e3bca212b6 100644 --- a/hadoop-project/pom.xml +++ b/hadoop-project/pom.xml @@ -769,6 +769,11 @@ maven-pdf-plugin 1.1 + + org.apache.hadoop + hadoop-maven-plugins + ${project.version} + diff --git a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml index 0d936c9f0c1..a5c44431810 100644 --- a/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml +++ b/hadoop-tools/hadoop-distcp/src/main/resources/distcp-default.xml @@ -1,5 +1,20 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml index 84662c076c7..49e8e3a0759 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/appendix.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml index 18c49259ae7..fd536c7290b 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/architecture.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml index e4eccd54878..f35038f85a5 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/cli.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml index 27108a7dcef..62e48fce8b5 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/index.xml @@ -1,4 +1,19 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml b/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml index a72ec05418e..208b0b7df94 100644 --- a/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml +++ b/hadoop-tools/hadoop-distcp/src/site/xdoc/usage.xml @@ -1,3 +1,18 @@ + diff --git a/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml b/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml index 016edf27ed1..64485f11f87 100644 --- a/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml +++ b/hadoop-tools/hadoop-distcp/src/test/resources/sslConfig.xml @@ -1,5 +1,20 @@ + diff --git a/hadoop-tools/hadoop-pipes/pom.xml b/hadoop-tools/hadoop-pipes/pom.xml index 70875f26c42..cf3a1d367b3 100644 --- a/hadoop-tools/hadoop-pipes/pom.xml +++ b/hadoop-tools/hadoop-pipes/pom.xml @@ -57,6 +57,9 @@ + + diff --git a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml index b552a1ccd76..5425de205bb 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml +++ b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word-part.xml @@ -1,4 +1,21 @@ + + diff --git a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml index ed727ddf333..9d1cd572dc3 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml +++ b/hadoop-tools/hadoop-pipes/src/main/native/examples/conf/word.xml @@ -1,4 +1,21 @@ + + diff --git a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt index 6cfd4d6d121..906522c73c3 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt +++ b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-gdb-commands.txt @@ -1,3 +1,14 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. info threads backtrace quit diff --git a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script index e7f59e5f6f5..6bacc437e43 100644 --- a/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script +++ b/hadoop-tools/hadoop-pipes/src/main/native/pipes/debug/pipes-default-script @@ -1,3 +1,14 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. core=`find . -name 'core*'` #Only pipes programs have 5th argument as program name. gdb -quiet $5 -c $core -x $HADOOP_PREFIX/src/c++/pipes/debug/pipes-default-gdb-commands.txt diff --git a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java index f160fcfab77..10b8d84852e 100644 --- a/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java +++ b/hadoop-tools/hadoop-rumen/src/main/java/org/apache/hadoop/tools/rumen/anonymization/WordList.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.tools.rumen.anonymization; import java.util.HashMap; diff --git a/hadoop-tools/hadoop-tools-dist/pom.xml b/hadoop-tools/hadoop-tools-dist/pom.xml index 951eab139c5..cbd0bb545a2 100644 --- a/hadoop-tools/hadoop-tools-dist/pom.xml +++ b/hadoop-tools/hadoop-tools-dist/pom.xml @@ -91,9 +91,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-tools/pom.xml b/hadoop-tools/pom.xml index bc75e2b6536..f6fada59ac1 100644 --- a/hadoop-tools/pom.xml +++ b/hadoop-tools/pom.xml @@ -54,9 +54,6 @@ org.apache.rat apache-rat-plugin - - pom.xml - diff --git a/hadoop-yarn-project/CHANGES.txt b/hadoop-yarn-project/CHANGES.txt index 2f071b8e7ee..5ba5a050b25 100644 --- a/hadoop-yarn-project/CHANGES.txt +++ b/hadoop-yarn-project/CHANGES.txt @@ -35,6 +35,11 @@ Release 2.0.3-alpha - Unreleased YARN-2. Enhanced CapacityScheduler to account for CPU alongwith memory for multi-dimensional resource scheduling. (acmurthy) + YARN-328. Use token request messages defined in hadoop common. (suresh) + + YARN-231. RM Restart - Add FS-based persistent store implementation for + RMStateStore (Bikas Saha via hitesh) + IMPROVEMENTS YARN-223. Update process tree instead of getting new process trees. @@ -97,6 +102,11 @@ Release 2.0.3-alpha - Unreleased YARN-170. Change NodeManager stop to be reentrant. (Sandy Ryza via vinodkv) + YARN-331. Fill in missing fair scheduler documentation. (sandyr via tucu) + + YARN-277. Use AMRMClient in DistributedShell to exemplify the approach. + (Bikas Saha via hitesh) + OPTIMIZATIONS BUG FIXES @@ -185,6 +195,24 @@ Release 2.0.3-alpha - Unreleased YARN-253. Fixed container-launch to not fail when there are no local resources to localize. (Tom White via vinodkv) + YARN-330. Fix flakey test: TestNodeManagerShutdown#testKillContainersOnShutdown. + (Sandy Ryza via hitesh) + + YARN-335. Fair scheduler doesn't check whether rack needs containers + before assigning to node. (Sandy Ryza via tomwhite) + + YARN-336. Fair scheduler FIFO scheduling within a queue only allows 1 + app at a time. (Sandy Ryza via tomwhite) + + YARN-135. Client tokens should be per app-attempt, and should be + unregistered on App-finish. (vinodkv via sseth) + + YARN-302. Fair scheduler assignmultiple should default to false. (sandyr via tucu) + + YARN-319. Submitting a job to a fair scheduler queue for which the user + does not have permission causes the client to wait forever. + (shenhong via tomwhite) + Release 2.0.2-alpha - 2012-09-07 INCOMPATIBLE CHANGES @@ -238,6 +266,18 @@ Release 2.0.2-alpha - 2012-09-07 YARN-138. Ensure default values for minimum/maximum container sizes is sane. (harsh & sseth via acmurthy) +Release 0.23.7 - UNRELEASED + + INCOMPATIBLE CHANGES + + NEW FEATURES + + IMPROVEMENTS + + OPTIMIZATIONS + + BUG FIXES + Release 0.23.6 - UNRELEASED INCOMPATIBLE CHANGES @@ -293,7 +333,12 @@ Release 0.23.6 - UNRELEASED YARN-325. RM CapacityScheduler can deadlock when getQueueInfo() is called and a container is completing (Arun C Murthy via tgraves) -Release 0.23.5 - UNRELEASED + YARN-334. Maven RAT plugin is not checking all source files (tgraves) + + YARN-354. WebAppProxyServer exits immediately after startup (Liang Xie via + jlowe) + +Release 0.23.5 - 2012-11-28 INCOMPATIBLE CHANGES diff --git a/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml b/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml index d14deea6cc5..25292c75e3e 100644 --- a/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml +++ b/hadoop-yarn-project/hadoop-yarn/conf/yarn-site.xml @@ -1,4 +1,17 @@ + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java index 95dcd59d45b..a38f94a7ede 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenRequestPBImpl.java @@ -17,13 +17,13 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.yarn.api.records.impl.pb.DelegationTokenPBImpl; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.CancelDelegationTokenRequestProtoOrBuilder; public class CancelDelegationTokenRequestPBImpl extends ProtoBase implements @@ -52,10 +52,7 @@ public class CancelDelegationTokenRequestPBImpl extends if (this.token != null) { return this.token; } - if (!p.hasDelegationToken()) { - return null; - } - this.token = convertFromProtoFormat(p.getDelegationToken()); + this.token = convertFromProtoFormat(p.getToken()); return this.token; } @@ -63,7 +60,7 @@ public class CancelDelegationTokenRequestPBImpl extends public void setDelegationToken(DelegationToken token) { maybeInitBuilder(); if (token == null) - builder.clearDelegationToken(); + builder.clearToken(); this.token = token; } @@ -77,7 +74,7 @@ public class CancelDelegationTokenRequestPBImpl extends private void mergeLocalToBuilder() { if (token != null) { - builder.setDelegationToken(convertToProtoFormat(this.token)); + builder.setToken(convertToProtoFormat(this.token)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java index 14908c7bbcc..5eb2b0713d8 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/CancelDelegationTokenResponsePBImpl.java @@ -17,9 +17,9 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; import org.apache.hadoop.yarn.api.records.ProtoBase; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.CancelDelegationTokenResponseProto; public class CancelDelegationTokenResponsePBImpl extends ProtoBase implements diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java index 57594446018..687d469b76c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenRequestPBImpl.java @@ -17,10 +17,10 @@ */ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.ProtoBase; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenRequestProtoOrBuilder; public class GetDelegationTokenRequestPBImpl extends ProtoBase implements GetDelegationTokenRequest { @@ -48,9 +48,6 @@ public class GetDelegationTokenRequestPBImpl extends if (this.renewer != null) { return this.renewer; } - if (!p.hasRenewer()) { - return null; - } this.renewer = p.getRenewer(); return this.renewer; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java index b459e2328d1..76c31b40c8b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/GetDelegationTokenResponsePBImpl.java @@ -18,13 +18,13 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProtoOrBuilder; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.protocolrecords.GetDelegationTokenResponse; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.yarn.api.records.impl.pb.DelegationTokenPBImpl; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenResponseProtoOrBuilder; public class GetDelegationTokenResponsePBImpl extends ProtoBase implements GetDelegationTokenResponse { @@ -53,10 +53,10 @@ ProtoBase implements GetDelegationTokenResponse if (this.appToken != null) { return this.appToken; } - if (!p.hasApplicationToken()) { + if (!p.hasToken()) { return null; } - this.appToken = convertFromProtoFormat(p.getApplicationToken()); + this.appToken = convertFromProtoFormat(p.getToken()); return this.appToken; } @@ -64,7 +64,7 @@ ProtoBase implements GetDelegationTokenResponse public void setRMDelegationToken(DelegationToken appToken) { maybeInitBuilder(); if (appToken == null) - builder.clearApplicationToken(); + builder.clearToken(); this.appToken = appToken; } @@ -79,7 +79,7 @@ ProtoBase implements GetDelegationTokenResponse private void mergeLocalToBuilder() { if (appToken != null) { - builder.setApplicationToken(convertToProtoFormat(this.appToken)); + builder.setToken(convertToProtoFormat(this.appToken)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java index ec927b15b6b..cf02e849085 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenRequestPBImpl.java @@ -17,13 +17,13 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProtoOrBuilder; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest; import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.yarn.api.records.impl.pb.DelegationTokenPBImpl; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RenewDelegationTokenRequestProtoOrBuilder; public class RenewDelegationTokenRequestPBImpl extends ProtoBase implements @@ -51,10 +51,7 @@ public class RenewDelegationTokenRequestPBImpl extends if (this.token != null) { return this.token; } - if (!p.hasDelegationToken()) { - return null; - } - this.token = convertFromProtoFormat(p.getDelegationToken()); + this.token = convertFromProtoFormat(p.getToken()); return this.token; } @@ -62,7 +59,7 @@ public class RenewDelegationTokenRequestPBImpl extends public void setDelegationToken(DelegationToken token) { maybeInitBuilder(); if (token == null) - builder.clearDelegationToken(); + builder.clearToken(); this.token = token; } @@ -77,7 +74,7 @@ public class RenewDelegationTokenRequestPBImpl extends private void mergeLocalToBuilder() { if (token != null) { - builder.setDelegationToken(convertToProtoFormat(this.token)); + builder.setToken(convertToProtoFormat(this.token)); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java index b5c80f199c9..ae1acc7ce1d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/protocolrecords/impl/pb/RenewDelegationTokenResponsePBImpl.java @@ -17,10 +17,10 @@ package org.apache.hadoop.yarn.api.protocolrecords.impl.pb; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProtoOrBuilder; import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenResponse; import org.apache.hadoop.yarn.api.records.ProtoBase; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RenewDelegationTokenResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RenewDelegationTokenResponseProtoOrBuilder; public class RenewDelegationTokenResponsePBImpl extends ProtoBase implements @@ -58,12 +58,12 @@ public class RenewDelegationTokenResponsePBImpl extends @Override public long getNextExpirationTime() { RenewDelegationTokenResponseProtoOrBuilder p = viaProto ? proto : builder; - return p.getNextExpiryTs(); + return p.getNewExpiryTime(); } @Override public void setNextExpirationTime(long expTime) { maybeInitBuilder(); - builder.setNextExpiryTs(expTime); + builder.setNewExpiryTime(expTime); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java index 58baec2d841..063a878b7be 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationAttemptId.java @@ -38,6 +38,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; public abstract class ApplicationAttemptId implements Comparable { + public static final String appAttemptIdStrPrefix = "appattempt_"; + /** * Get the ApplicationId of the ApplicationAttempId. * @return ApplicationId of the ApplicationAttempId @@ -111,11 +113,11 @@ public abstract class ApplicationAttemptId implements @Override public String toString() { - StringBuilder sb = new StringBuilder("appattempt_"); + StringBuilder sb = new StringBuilder(appAttemptIdStrPrefix); sb.append(this.getApplicationId().getClusterTimestamp()).append("_"); sb.append(ApplicationId.appIdFormat.get().format( this.getApplicationId().getId())); sb.append("_").append(attemptIdFormat.get().format(getAttemptId())); return sb.toString(); } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java index 21c6502e71e..45aa0157798 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationId.java @@ -38,6 +38,8 @@ import org.apache.hadoop.classification.InterfaceStability.Unstable; @Stable public abstract class ApplicationId implements Comparable { + public static final String appIdStrPrefix = "application_"; + /** * Get the short integer identifier of the ApplicationId * which is unique for all applications started by a particular instance @@ -88,7 +90,7 @@ public abstract class ApplicationId implements Comparable { @Override public String toString() { - return "application_" + this.getClusterTimestamp() + "_" + return appIdStrPrefix + this.getClusterTimestamp() + "_" + appIdFormat.get().format(getId()); } @@ -119,4 +121,4 @@ public abstract class ApplicationId implements Comparable { return false; return true; } -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java index c3103cb16e4..45adedbc18b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationMaster.java @@ -45,8 +45,8 @@ public interface ApplicationMaster { YarnApplicationState getState(); void setState(YarnApplicationState state); - String getClientToken(); - void setClientToken(String clientToken); + ClientToken getClientToken(); + void setClientToken(ClientToken clientToken); int getAMFailCount(); void setAMFailCount(int amFailCount); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java index 99cbcca0496..db68efde590 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ApplicationReport.java @@ -144,11 +144,11 @@ public interface ApplicationReport { */ @Public @Stable - String getClientToken(); + ClientToken getClientToken(); @Private @Unstable - void setClientToken(String clientToken); + void setClientToken(ClientToken clientToken); /** * Get the YarnApplicationState of the application. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ClientToken.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ClientToken.java new file mode 100644 index 00000000000..92c4d9eb5d3 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ClientToken.java @@ -0,0 +1,39 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; + +/** + *

+ * ClientToken is the security token used by the AMs to verify + * authenticity of any client. + *

+ * + *

+ * The ResourceManager, provides a secure token (via + * {@link ApplicationReport#getClientToken()}) which is verified by the + * ApplicationMaster when the client directly talks to an AM. + *

+ * + */ +@Public +@Stable +public interface ClientToken extends Token {} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java index 97cf47d9620..76ea73d8aac 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/ContainerToken.java @@ -18,9 +18,6 @@ package org.apache.hadoop.yarn.api.records; -import java.nio.ByteBuffer; - -import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.yarn.api.AMRMProtocol; @@ -43,53 +40,4 @@ import org.apache.hadoop.yarn.api.ContainerManager; */ @Public @Stable -public interface ContainerToken extends DelegationToken { - /** - * Get the token identifier. - * @return token identifier - */ - @Public - @Stable - ByteBuffer getIdentifier(); - - @Private - @Stable - void setIdentifier(ByteBuffer identifier); - - /** - * Get the token password - * @return token password - */ - @Public - @Stable - ByteBuffer getPassword(); - - @Private - @Stable - void setPassword(ByteBuffer password); - - /** - * Get the token kind. - * @return token kind - */ - @Public - @Stable - String getKind(); - - @Private - @Stable - void setKind(String kind); - - /** - * Get the service to which the token is allocated. - * @return service to which the token is allocated - */ - @Public - @Stable - String getService(); - - @Private - @Stable - void setService(String service); - -} +public interface ContainerToken extends Token {} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/DelegationToken.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/DelegationToken.java index e6579ac30d1..a7870df930b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/DelegationToken.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/DelegationToken.java @@ -18,12 +18,8 @@ package org.apache.hadoop.yarn.api.records; -import java.nio.ByteBuffer; - -import org.apache.hadoop.classification.InterfaceAudience.Private; import org.apache.hadoop.classification.InterfaceAudience.Public; import org.apache.hadoop.classification.InterfaceStability.Evolving; -import org.apache.hadoop.classification.InterfaceStability.Stable; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; /** @@ -33,52 +29,4 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti */ @Public @Evolving -public interface DelegationToken { - /** - * Get the token identifier. - * @return token identifier - */ - @Public - @Stable - ByteBuffer getIdentifier(); - - @Private - @Stable - void setIdentifier(ByteBuffer identifier); - - /** - * Get the token password - * @return token password - */ - @Public - @Stable - ByteBuffer getPassword(); - - @Private - @Stable - void setPassword(ByteBuffer password); - - /** - * Get the token kind. - * @return token kind - */ - @Public - @Stable - String getKind(); - - @Private - @Stable - void setKind(String kind); - - /** - * Get the service to which the token is allocated. - * @return service to which the token is allocated - */ - @Public - @Stable - String getService(); - - @Private - @Stable - void setService(String service); -} +public interface DelegationToken extends Token {} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Token.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Token.java new file mode 100644 index 00000000000..addc234e036 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/Token.java @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceAudience.Public; +import org.apache.hadoop.classification.InterfaceStability.Stable; + +/** + *

Token is the security entity used by the framework + * to verify authenticity of any resource.

+ */ +@Public +@Stable +public interface Token { + /** + * Get the token identifier. + * @return token identifier + */ + @Public + @Stable + ByteBuffer getIdentifier(); + + @Private + @Stable + void setIdentifier(ByteBuffer identifier); + + /** + * Get the token password + * @return token password + */ + @Public + @Stable + ByteBuffer getPassword(); + + @Private + @Stable + void setPassword(ByteBuffer password); + + /** + * Get the token kind. + * @return token kind + */ + @Public + @Stable + String getKind(); + + @Private + @Stable + void setKind(String kind); + + /** + * Get the service to which the token is allocated. + * @return service to which the token is allocated + */ + @Public + @Stable + String getService(); + + @Private + @Stable + void setService(String service); + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationMasterPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationMasterPBImpl.java index 123e178806b..f136a4a506c 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationMasterPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationMasterPBImpl.java @@ -18,10 +18,11 @@ package org.apache.hadoop.yarn.api.records.impl.pb; - +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationMaster; import org.apache.hadoop.yarn.api.records.ApplicationStatus; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; @@ -31,15 +32,15 @@ import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; import org.apache.hadoop.yarn.util.ProtoUtils; - -public class ApplicationMasterPBImpl extends ProtoBase implements ApplicationMaster { +public class ApplicationMasterPBImpl extends ProtoBase + implements ApplicationMaster { ApplicationMasterProto proto = ApplicationMasterProto.getDefaultInstance(); ApplicationMasterProto.Builder builder = null; boolean viaProto = false; private ApplicationId applicationId = null; private ApplicationStatus applicationStatus = null; - + private ClientToken clientToken = null; public ApplicationMasterPBImpl() { builder = ApplicationMasterProto.newBuilder(); @@ -59,13 +60,22 @@ public class ApplicationMasterPBImpl extends ProtoBase i } private void mergeLocalToBuilder() { - if (this.applicationId != null && !((ApplicationIdPBImpl)this.applicationId).getProto().equals(builder.getApplicationId())) { + if (this.applicationId != null + && !((ApplicationIdPBImpl) this.applicationId).getProto().equals( + builder.getApplicationId())) { builder.setApplicationId(convertToProtoFormat(this.applicationId)); } - if (this.applicationStatus != null && !((ApplicationStatusPBImpl)this.applicationStatus).getProto().equals(builder.getStatus())) { + if (this.applicationStatus != null + && !((ApplicationStatusPBImpl) this.applicationStatus).getProto() + .equals(builder.getStatus())) { builder.setStatus(convertToProtoFormat(this.applicationStatus)); } + if (this.clientToken != null + && !((ClientTokenPBImpl) this.clientToken).getProto().equals( + builder.getClientToken())) { + builder.setClientToken(convertToProtoFormat(this.clientToken)); + } } private void mergeLocalToProto() { @@ -188,23 +198,26 @@ public class ApplicationMasterPBImpl extends ProtoBase i this.applicationStatus = status; } + @Override - public String getClientToken() { + public ClientToken getClientToken() { ApplicationMasterProtoOrBuilder p = viaProto ? proto : builder; + if (this.clientToken != null) { + return this.clientToken; + } if (!p.hasClientToken()) { return null; } - return (p.getClientToken()); + this.clientToken = convertFromProtoFormat(p.getClientToken()); + return this.clientToken; } - + @Override - public void setClientToken(String clientToken) { + public void setClientToken(ClientToken clientToken) { maybeInitBuilder(); - if (clientToken == null) { + if (clientToken == null) builder.clearClientToken(); - return; - } - builder.setClientToken((clientToken)); + this.clientToken = clientToken; } @Override @@ -271,4 +284,11 @@ public class ApplicationMasterPBImpl extends ProtoBase i return ((ApplicationStatusPBImpl)t).getProto(); } + private ClientTokenPBImpl convertFromProtoFormat(TokenProto p) { + return new ClientTokenPBImpl(p); + } + + private TokenProto convertToProtoFormat(ClientToken t) { + return ((ClientTokenPBImpl)t).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java index 8def3956785..69f939c088e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ApplicationReportPBImpl.java @@ -18,19 +18,21 @@ package org.apache.hadoop.yarn.api.records.impl.pb; +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; -import org.apache.hadoop.yarn.api.records.YarnApplicationState; -import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; +import org.apache.hadoop.yarn.api.records.ClientToken; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; +import org.apache.hadoop.yarn.api.records.ProtoBase; +import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProtoOrBuilder; -import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; +import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; import org.apache.hadoop.yarn.util.ProtoUtils; @@ -40,8 +42,9 @@ implements ApplicationReport { ApplicationReportProto.Builder builder = null; boolean viaProto = false; - ApplicationId applicationId; - ApplicationAttemptId currentApplicationAttemptId; + private ApplicationId applicationId; + private ApplicationAttemptId currentApplicationAttemptId; + private ClientToken clientToken = null; public ApplicationReportPBImpl() { builder = ApplicationReportProto.newBuilder(); @@ -159,12 +162,16 @@ implements ApplicationReport { } @Override - public String getClientToken() { + public ClientToken getClientToken() { ApplicationReportProtoOrBuilder p = viaProto ? proto : builder; + if (this.clientToken != null) { + return this.clientToken; + } if (!p.hasClientToken()) { return null; } - return (p.getClientToken()); + this.clientToken = convertFromProtoFormat(p.getClientToken()); + return this.clientToken; } @Override @@ -176,7 +183,6 @@ implements ApplicationReport { return p.getUser(); } - @Override public String getDiagnostics() { ApplicationReportProtoOrBuilder p = viaProto ? proto : builder; @@ -290,13 +296,11 @@ implements ApplicationReport { } @Override - public void setClientToken(String clientToken) { + public void setClientToken(ClientToken clientToken) { maybeInitBuilder(); - if (clientToken == null) { + if (clientToken == null) builder.clearClientToken(); - return; - } - builder.setClientToken((clientToken)); + this.clientToken = clientToken; } @Override @@ -360,6 +364,11 @@ implements ApplicationReport { builder.getCurrentApplicationAttemptId())) { builder.setCurrentApplicationAttemptId(convertToProtoFormat(this.currentApplicationAttemptId)); } + if (this.clientToken != null + && !((ClientTokenPBImpl) this.clientToken).getProto().equals( + builder.getClientToken())) { + builder.setClientToken(convertToProtoFormat(this.clientToken)); + } } private void mergeLocalToProto() { @@ -419,4 +428,11 @@ implements ApplicationReport { return ProtoUtils.convertToProtoFormat(s); } + private ClientTokenPBImpl convertFromProtoFormat(TokenProto p) { + return new ClientTokenPBImpl(p); + } + + private TokenProto convertToProtoFormat(ClientToken t) { + return ((ClientTokenPBImpl)t).getProto(); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ClientTokenPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ClientTokenPBImpl.java new file mode 100644 index 00000000000..6f5c52857e7 --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ClientTokenPBImpl.java @@ -0,0 +1,33 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl.pb; + +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; +import org.apache.hadoop.yarn.api.records.ClientToken; + +public class ClientTokenPBImpl extends TokenPBImpl implements ClientToken { + + public ClientTokenPBImpl() { + super(); + } + + public ClientTokenPBImpl(TokenProto p) { + super(p); + } +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java index e2bd8de020e..92a710a20f4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerPBImpl.java @@ -18,7 +18,6 @@ package org.apache.hadoop.yarn.api.records.impl.pb; - import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; @@ -38,8 +37,6 @@ import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; import org.apache.hadoop.yarn.proto.YarnProtos.PriorityProto; import org.apache.hadoop.yarn.proto.YarnProtos.ResourceProto; import org.apache.hadoop.yarn.util.ProtoUtils; - - public class ContainerPBImpl extends ProtoBase implements Container { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerTokenPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerTokenPBImpl.java index eecc8363cd0..87676c59613 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerTokenPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ContainerTokenPBImpl.java @@ -18,149 +18,16 @@ package org.apache.hadoop.yarn.api.records.impl.pb; - -import java.nio.ByteBuffer; - -import org.apache.hadoop.yarn.api.records.ContainerToken; -import org.apache.hadoop.yarn.api.records.ProtoBase; import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder; +import org.apache.hadoop.yarn.api.records.ContainerToken; +public class ContainerTokenPBImpl extends TokenPBImpl implements ContainerToken { - -public class ContainerTokenPBImpl extends ProtoBase implements ContainerToken { - private TokenProto proto = TokenProto.getDefaultInstance(); - private TokenProto.Builder builder = null; - private boolean viaProto = false; - - private ByteBuffer identifier; - private ByteBuffer password; - - public ContainerTokenPBImpl() { - builder = TokenProto.newBuilder(); + super(); } - public ContainerTokenPBImpl(TokenProto proto) { - this.proto = proto; - viaProto = true; + public ContainerTokenPBImpl(TokenProto p) { + super(p); } - - public synchronized TokenProto getProto() { - mergeLocalToProto(); - proto = viaProto ? proto : builder.build(); - viaProto = true; - return proto; - } - - private synchronized void mergeLocalToBuilder() { - if (this.identifier != null) { - builder.setIdentifier(convertToProtoFormat(this.identifier)); - } - if (this.password != null) { - builder.setPassword(convertToProtoFormat(this.password)); - } - } - - private synchronized void mergeLocalToProto() { - if (viaProto) - maybeInitBuilder(); - mergeLocalToBuilder(); - proto = builder.build(); - viaProto = true; - } - - private synchronized void maybeInitBuilder() { - if (viaProto || builder == null) { - builder = TokenProto.newBuilder(proto); - } - viaProto = false; - } - - - @Override - public synchronized ByteBuffer getIdentifier() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (this.identifier != null) { - return this.identifier; - } - if (!p.hasIdentifier()) { - return null; - } - this.identifier = convertFromProtoFormat(p.getIdentifier()); - return this.identifier; - } - - @Override - public synchronized void setIdentifier(ByteBuffer identifier) { - maybeInitBuilder(); - if (identifier == null) - builder.clearIdentifier(); - this.identifier = identifier; - } - @Override - public synchronized ByteBuffer getPassword() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (this.password != null) { - return this.password; - } - if (!p.hasPassword()) { - return null; - } - this.password = convertFromProtoFormat(p.getPassword()); - return this.password; - } - - @Override - public synchronized void setPassword(ByteBuffer password) { - maybeInitBuilder(); - if (password == null) - builder.clearPassword(); - this.password = password; - } - @Override - public synchronized String getKind() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (!p.hasKind()) { - return null; - } - return (p.getKind()); - } - - @Override - public synchronized void setKind(String kind) { - maybeInitBuilder(); - if (kind == null) { - builder.clearKind(); - return; - } - builder.setKind((kind)); - } - @Override - public synchronized String getService() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (!p.hasService()) { - return null; - } - return (p.getService()); - } - - @Override - public synchronized void setService(String service) { - maybeInitBuilder(); - if (service == null) { - builder.clearService(); - return; - } - builder.setService((service)); - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append("ContainerToken { "); - sb.append("kind: ").append(getKind()).append(", "); - sb.append("service: ").append(getService()).append(" }"); - return sb.toString(); - } -} +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/DelegationTokenPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/DelegationTokenPBImpl.java index fae6d2483d8..4901cde7d00 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/DelegationTokenPBImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/DelegationTokenPBImpl.java @@ -18,139 +18,17 @@ package org.apache.hadoop.yarn.api.records.impl.pb; -import java.nio.ByteBuffer; - import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; -import org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder; import org.apache.hadoop.yarn.api.records.DelegationToken; -import org.apache.hadoop.yarn.api.records.ProtoBase; -public class DelegationTokenPBImpl extends ProtoBase - implements DelegationToken { - private TokenProto proto = TokenProto.getDefaultInstance(); - private TokenProto.Builder builder = null; - private boolean viaProto = false; - - private ByteBuffer identifier; - private ByteBuffer password; - - +public class DelegationTokenPBImpl extends TokenPBImpl implements + DelegationToken { + public DelegationTokenPBImpl() { - builder = TokenProto.newBuilder(); + super(); } - public DelegationTokenPBImpl(TokenProto proto) { - this.proto = proto; - viaProto = true; + public DelegationTokenPBImpl(TokenProto p) { + super(p); } - - public synchronized TokenProto getProto() { - mergeLocalToProto(); - proto = viaProto ? proto : builder.build(); - viaProto = true; - return proto; - } - - private synchronized void mergeLocalToBuilder() { - if (this.identifier != null) { - builder.setIdentifier(convertToProtoFormat(this.identifier)); - } - if (this.password != null) { - builder.setPassword(convertToProtoFormat(this.password)); - } - } - - private synchronized void mergeLocalToProto() { - if (viaProto) - maybeInitBuilder(); - mergeLocalToBuilder(); - proto = builder.build(); - viaProto = true; - } - - private synchronized void maybeInitBuilder() { - if (viaProto || builder == null) { - builder = TokenProto.newBuilder(proto); - } - viaProto = false; - } - - - @Override - public synchronized ByteBuffer getIdentifier() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (this.identifier != null) { - return this.identifier; - } - if (!p.hasIdentifier()) { - return null; - } - this.identifier = convertFromProtoFormat(p.getIdentifier()); - return this.identifier; - } - - @Override - public synchronized void setIdentifier(ByteBuffer identifier) { - maybeInitBuilder(); - if (identifier == null) - builder.clearIdentifier(); - this.identifier = identifier; - } - @Override - public synchronized ByteBuffer getPassword() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (this.password != null) { - return this.password; - } - if (!p.hasPassword()) { - return null; - } - this.password = convertFromProtoFormat(p.getPassword()); - return this.password; - } - - @Override - public synchronized void setPassword(ByteBuffer password) { - maybeInitBuilder(); - if (password == null) - builder.clearPassword(); - this.password = password; - } - @Override - public synchronized String getKind() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (!p.hasKind()) { - return null; - } - return (p.getKind()); - } - - @Override - public synchronized void setKind(String kind) { - maybeInitBuilder(); - if (kind == null) { - builder.clearKind(); - return; - } - builder.setKind((kind)); - } - @Override - public synchronized String getService() { - TokenProtoOrBuilder p = viaProto ? proto : builder; - if (!p.hasService()) { - return null; - } - return (p.getService()); - } - - @Override - public synchronized void setService(String service) { - maybeInitBuilder(); - if (service == null) { - builder.clearService(); - return; - } - builder.setService((service)); - } - -} +} \ No newline at end of file diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java new file mode 100644 index 00000000000..fe304e03ada --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/TokenPBImpl.java @@ -0,0 +1,165 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.api.records.impl.pb; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.security.proto.SecurityProtos.TokenProto; +import org.apache.hadoop.security.proto.SecurityProtos.TokenProtoOrBuilder; +import org.apache.hadoop.yarn.api.records.ProtoBase; +import org.apache.hadoop.yarn.api.records.Token; + +public class TokenPBImpl extends ProtoBase implements + Token { + private TokenProto proto = TokenProto.getDefaultInstance(); + private TokenProto.Builder builder = null; + private boolean viaProto = false; + + private ByteBuffer identifier; + private ByteBuffer password; + + public TokenPBImpl() { + builder = TokenProto.newBuilder(); + } + + public TokenPBImpl(TokenProto proto) { + this.proto = proto; + viaProto = true; + } + + public synchronized TokenProto getProto() { + mergeLocalToProto(); + proto = viaProto ? proto : builder.build(); + viaProto = true; + return proto; + } + + private synchronized void mergeLocalToBuilder() { + if (this.identifier != null) { + builder.setIdentifier(convertToProtoFormat(this.identifier)); + } + if (this.password != null) { + builder.setPassword(convertToProtoFormat(this.password)); + } + } + + private synchronized void mergeLocalToProto() { + if (viaProto) + maybeInitBuilder(); + mergeLocalToBuilder(); + proto = builder.build(); + viaProto = true; + } + + private synchronized void maybeInitBuilder() { + if (viaProto || builder == null) { + builder = TokenProto.newBuilder(proto); + } + viaProto = false; + } + + @Override + public synchronized ByteBuffer getIdentifier() { + TokenProtoOrBuilder p = viaProto ? proto : builder; + if (this.identifier != null) { + return this.identifier; + } + if (!p.hasIdentifier()) { + return null; + } + this.identifier = convertFromProtoFormat(p.getIdentifier()); + return this.identifier; + } + + @Override + public synchronized void setIdentifier(ByteBuffer identifier) { + maybeInitBuilder(); + if (identifier == null) + builder.clearIdentifier(); + this.identifier = identifier; + } + + @Override + public synchronized ByteBuffer getPassword() { + TokenProtoOrBuilder p = viaProto ? proto : builder; + if (this.password != null) { + return this.password; + } + if (!p.hasPassword()) { + return null; + } + this.password = convertFromProtoFormat(p.getPassword()); + return this.password; + } + + @Override + public synchronized void setPassword(ByteBuffer password) { + maybeInitBuilder(); + if (password == null) + builder.clearPassword(); + this.password = password; + } + + @Override + public synchronized String getKind() { + TokenProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasKind()) { + return null; + } + return (p.getKind()); + } + + @Override + public synchronized void setKind(String kind) { + maybeInitBuilder(); + if (kind == null) { + builder.clearKind(); + return; + } + builder.setKind((kind)); + } + + @Override + public synchronized String getService() { + TokenProtoOrBuilder p = viaProto ? proto : builder; + if (!p.hasService()) { + return null; + } + return (p.getService()); + } + + @Override + public synchronized void setService(String service) { + maybeInitBuilder(); + if (service == null) { + builder.clearService(); + return; + } + builder.setService((service)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("Token { "); + sb.append("kind: ").append(getKind()).append(", "); + sb.append("service: ").append(getService()).append(" }"); + return sb.toString(); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java index 5a73eabce1d..0fea4aa33c1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/ProtoUtils.java @@ -28,7 +28,6 @@ import org.apache.hadoop.security.token.TokenIdentifier; import org.apache.hadoop.yarn.api.records.ApplicationAccessType; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ContainerState; -import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResourceType; import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; @@ -37,16 +36,16 @@ import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.QueueState; import org.apache.hadoop.yarn.api.records.YarnApplicationState; -import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.NodeIdPBImpl; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAccessTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationResourceUsageReportProto; import org.apache.hadoop.yarn.proto.YarnProtos.ContainerStateProto; -import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; -import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.FinalApplicationStatusProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceTypeProto; import org.apache.hadoop.yarn.proto.YarnProtos.LocalResourceVisibilityProto; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeIdProto; +import org.apache.hadoop.yarn.proto.YarnProtos.NodeStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueACLProto; import org.apache.hadoop.yarn.proto.YarnProtos.QueueStateProto; import org.apache.hadoop.yarn.proto.YarnProtos.YarnApplicationStateProto; @@ -206,8 +205,9 @@ public class ProtoUtils { * @param serviceAddr the connect address for the service * @return rpc token */ - public static Token - convertFromProtoFormat(DelegationToken protoToken, InetSocketAddress serviceAddr) { + public static Token convertFromProtoFormat( + org.apache.hadoop.yarn.api.records.Token protoToken, + InetSocketAddress serviceAddr) { Token token = new Token(protoToken.getIdentifier().array(), protoToken.getPassword().array(), new Text(protoToken.getKind()), diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto index 7495ce8784f..5aa2380ae5f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/client_RM_protocol.proto @@ -21,6 +21,7 @@ option java_outer_classname = "ClientRMProtocol"; option java_generic_services = true; option java_generate_equals_and_hash = true; +import "Security.proto"; import "yarn_service_protos.proto"; service ClientRMProtocolService { @@ -33,8 +34,8 @@ service ClientRMProtocolService { rpc getClusterNodes (GetClusterNodesRequestProto) returns (GetClusterNodesResponseProto); rpc getQueueInfo (GetQueueInfoRequestProto) returns (GetQueueInfoResponseProto); rpc getQueueUserAcls (GetQueueUserAclsInfoRequestProto) returns (GetQueueUserAclsInfoResponseProto); - rpc getDelegationToken(GetDelegationTokenRequestProto) returns (GetDelegationTokenResponseProto); - rpc renewDelegationToken(RenewDelegationTokenRequestProto) returns (RenewDelegationTokenResponseProto); - rpc cancelDelegationToken(CancelDelegationTokenRequestProto) returns (CancelDelegationTokenResponseProto); + rpc getDelegationToken(hadoop.common.GetDelegationTokenRequestProto) returns (hadoop.common.GetDelegationTokenResponseProto); + rpc renewDelegationToken(hadoop.common.RenewDelegationTokenRequestProto) returns (hadoop.common.RenewDelegationTokenResponseProto); + rpc cancelDelegationToken(hadoop.common.CancelDelegationTokenRequestProto) returns (hadoop.common.CancelDelegationTokenResponseProto); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto index f6ce67f0b05..3fe519f8579 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_protos.proto @@ -102,7 +102,7 @@ message ApplicationMasterProto { optional string trackingUrl = 4; optional ApplicationStatusProto status = 5; optional YarnApplicationStateProto state = 6; - optional string client_token = 7; + optional hadoop.common.TokenProto client_token = 7; optional int32 containerCount = 8; optional int32 amFailCount = 9; optional string diagnostics = 10 [default = ""]; @@ -151,7 +151,7 @@ message ApplicationReportProto { optional string name = 4; optional string host = 5; optional int32 rpc_port = 6; - optional string client_token = 7; + optional hadoop.common.TokenProto client_token = 7; optional ApplicationStatusProto status = 8; optional YarnApplicationStateProto yarn_application_state = 9; optional ContainerProto masterContainer = 10; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto index 637cb17c838..2c59e9fe5d6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto/yarn_service_protos.proto @@ -140,30 +140,6 @@ message GetQueueUserAclsInfoResponseProto { } -message GetDelegationTokenRequestProto { - optional string renewer = 1; -} - -message GetDelegationTokenResponseProto { - optional hadoop.common.TokenProto application_token = 1; -} - -message RenewDelegationTokenRequestProto { - required hadoop.common.TokenProto delegation_token = 1; -} - -message RenewDelegationTokenResponseProto { - required int64 next_expiry_ts = 1; -} - -message CancelDelegationTokenRequestProto { - required hadoop.common.TokenProto delegation_token = 1; -} - -message CancelDelegationTokenResponseProto { -} - - ////////////////////////////////////////////////////// /////// client_NM_Protocol /////////////////////////// ////////////////////////////////////////////////////// diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java index 4323962e807..71a81b0496e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/ApplicationMaster.java @@ -29,7 +29,6 @@ import java.util.HashMap; import java.util.List; import java.util.Map; import java.util.Vector; -import java.util.concurrent.CopyOnWriteArrayList; import java.util.concurrent.atomic.AtomicInteger; import org.apache.commons.cli.CommandLine; @@ -51,9 +50,6 @@ import org.apache.hadoop.yarn.api.ContainerManager; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse; import org.apache.hadoop.yarn.api.protocolrecords.FinishApplicationMasterRequest; -//import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest; -//import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse; -import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterRequest; import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; @@ -71,6 +67,9 @@ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.client.AMRMClient; +import org.apache.hadoop.yarn.client.AMRMClient.ContainerRequest; +import org.apache.hadoop.yarn.client.AMRMClientImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.ipc.YarnRPC; @@ -78,37 +77,64 @@ import org.apache.hadoop.yarn.util.ConverterUtils; import org.apache.hadoop.yarn.util.Records; /** - * An ApplicationMaster for executing shell commands on a set of launched containers using the YARN framework. + * An ApplicationMaster for executing shell commands on a set of launched + * containers using the YARN framework. * - *

This class is meant to act as an example on how to write yarn-based application masters.

+ *

+ * This class is meant to act as an example on how to write yarn-based + * application masters. + *

* - *

The ApplicationMaster is started on a container by the ResourceManager's launcher. - * The first thing that the ApplicationMaster needs to do is to connect and register itself with - * the ResourceManager. The registration sets up information within the ResourceManager - * regarding what host:port the ApplicationMaster is listening on to provide any form of functionality to a client - * as well as a tracking url that a client can use to keep track of status/job history if needed.

+ *

+ * The ApplicationMaster is started on a container by the + * ResourceManager's launcher. The first thing that the + * ApplicationMaster needs to do is to connect and register itself + * with the ResourceManager. The registration sets up information + * within the ResourceManager regarding what host:port the + * ApplicationMaster is listening on to provide any form of functionality to a + * client as well as a tracking url that a client can use to keep track of + * status/job history if needed. + *

* - *

The ApplicationMaster needs to send a heartbeat to the ResourceManager at regular intervals - * to inform the ResourceManager that it is up and alive. The {@link AMRMProtocol#allocate} to the - * ResourceManager from the ApplicationMaster acts as a heartbeat. + *

+ * The ApplicationMaster needs to send a heartbeat to the + * ResourceManager at regular intervals to inform the + * ResourceManager that it is up and alive. The + * {@link AMRMProtocol#allocate} to the ResourceManager from the + * ApplicationMaster acts as a heartbeat. * - *

For the actual handling of the job, the ApplicationMaster has to request the - * ResourceManager via {@link AllocateRequest} for the required no. of containers using {@link ResourceRequest} - * with the necessary resource specifications such as node location, computational (memory/disk/cpu) resource requirements. - * The ResourceManager responds with an {@link AllocateResponse} that informs the ApplicationMaster - * of the set of newly allocated containers, completed containers as well as current state of available resources.

+ *

+ * For the actual handling of the job, the ApplicationMaster has to + * request the ResourceManager via {@link AllocateRequest} for the + * required no. of containers using {@link ResourceRequest} with the necessary + * resource specifications such as node location, computational + * (memory/disk/cpu) resource requirements. The ResourceManager + * responds with an {@link AllocateResponse} that informs the + * ApplicationMaster of the set of newly allocated containers, + * completed containers as well as current state of available resources. + *

* - *

For each allocated container, the ApplicationMaster can then set up the necessary launch context via - * {@link ContainerLaunchContext} to specify the allocated container id, local resources required by the executable, - * the environment to be setup for the executable, commands to execute, etc. and submit a {@link StartContainerRequest} - * to the {@link ContainerManager} to launch and execute the defined commands on the given allocated container.

- * - *

The ApplicationMaster can monitor the launched container by either querying the ResourceManager - * using {@link AMRMProtocol#allocate} to get updates on completed containers or via the {@link ContainerManager} - * by querying for the status of the allocated container's {@link ContainerId}. + *

+ * For each allocated container, the ApplicationMaster can then set + * up the necessary launch context via {@link ContainerLaunchContext} to specify + * the allocated container id, local resources required by the executable, the + * environment to be setup for the executable, commands to execute, etc. and + * submit a {@link StartContainerRequest} to the {@link ContainerManager} to + * launch and execute the defined commands on the given allocated container. + *

* - *

After the job has been completed, the ApplicationMaster has to send a {@link FinishApplicationMasterRequest} - * to the ResourceManager to inform it that the ApplicationMaster has been completed. + *

+ * The ApplicationMaster can monitor the launched container by + * either querying the ResourceManager using + * {@link AMRMProtocol#allocate} to get updates on completed containers or via + * the {@link ContainerManager} by querying for the status of the allocated + * container's {@link ContainerId}. + * + *

+ * After the job has been completed, the ApplicationMaster has to + * send a {@link FinishApplicationMasterRequest} to the + * ResourceManager to inform it that the + * ApplicationMaster has been completed. */ @InterfaceAudience.Public @InterfaceStability.Unstable @@ -116,61 +142,58 @@ public class ApplicationMaster { private static final Log LOG = LogFactory.getLog(ApplicationMaster.class); - // Configuration + // Configuration private Configuration conf; // YARN RPC to communicate with the Resource Manager or Node Manager private YarnRPC rpc; // Handle to communicate with the Resource Manager - private AMRMProtocol resourceManager; + private AMRMClient resourceManager; // Application Attempt Id ( combination of attemptId and fail count ) private ApplicationAttemptId appAttemptID; // TODO // For status update for clients - yet to be implemented - // Hostname of the container + // Hostname of the container private String appMasterHostname = ""; - // Port on which the app master listens for status update requests from clients + // Port on which the app master listens for status updates from clients private int appMasterRpcPort = 0; - // Tracking url to which app master publishes info for clients to monitor + // Tracking url to which app master publishes info for clients to monitor private String appMasterTrackingUrl = ""; // App Master configuration // No. of containers to run shell command on private int numTotalContainers = 1; - // Memory to request for the container on which the shell command will run + // Memory to request for the container on which the shell command will run private int containerMemory = 10; // Priority of the request - private int requestPriority; - - // Incremental counter for rpc calls to the RM - private AtomicInteger rmRequestID = new AtomicInteger(); + private int requestPriority; // Simple flag to denote whether all works is done - private boolean appDone = false; + private boolean appDone = false; // Counter for completed containers ( complete denotes successful or failed ) private AtomicInteger numCompletedContainers = new AtomicInteger(); // Allocated container count so that we know how many containers has the RM // allocated to us private AtomicInteger numAllocatedContainers = new AtomicInteger(); - // Count of failed containers + // Count of failed containers private AtomicInteger numFailedContainers = new AtomicInteger(); // Count of containers already requested from the RM - // Needed as once requested, we should not request for containers again and again. - // Only request for more if the original requirement changes. + // Needed as once requested, we should not request for containers again. + // Only request for more if the original requirement changes. private AtomicInteger numRequestedContainers = new AtomicInteger(); - // Shell command to be executed - private String shellCommand = ""; + // Shell command to be executed + private String shellCommand = ""; // Args to be passed to the shell command private String shellArgs = ""; - // Env variables to be setup for the shell command + // Env variables to be setup for the shell command private Map shellEnv = new HashMap(); // Location of shell script ( obtained from info set in env ) // Shell script path in fs - private String shellScriptPath = ""; + private String shellScriptPath = ""; // Timestamp needed for creating a local resource private long shellScriptPathTimestamp = 0; // File length needed for local resource @@ -179,9 +202,6 @@ public class ApplicationMaster { // Hardcoded path to shell script in launch container's local env private final String ExecShellStringPath = "ExecShellScript.sh"; - // Containers to be released - private CopyOnWriteArrayList releasedContainers = new CopyOnWriteArrayList(); - // Launch threads private List launchThreads = new ArrayList(); @@ -205,8 +225,7 @@ public class ApplicationMaster { if (result) { LOG.info("Application Master completed successfully. exiting"); System.exit(0); - } - else { + } else { LOG.info("Application Master failed. exiting"); System.exit(2); } @@ -221,7 +240,8 @@ public class ApplicationMaster { Map envs = System.getenv(); for (Map.Entry env : envs.entrySet()) { LOG.info("System env: key=" + env.getKey() + ", val=" + env.getValue()); - System.out.println("System env: key=" + env.getKey() + ", val=" + env.getValue()); + System.out.println("System env: key=" + env.getKey() + ", val=" + + env.getValue()); } String cmd = "ls -al"; @@ -231,9 +251,10 @@ public class ApplicationMaster { pr = run.exec(cmd); pr.waitFor(); - BufferedReader buf = new BufferedReader(new InputStreamReader(pr.getInputStream())); + BufferedReader buf = new BufferedReader(new InputStreamReader( + pr.getInputStream())); String line = ""; - while ((line=buf.readLine())!=null) { + while ((line = buf.readLine()) != null) { LOG.info("System CWD content: " + line); System.out.println("System CWD content: " + line); } @@ -242,31 +263,39 @@ public class ApplicationMaster { e.printStackTrace(); } catch (InterruptedException e) { e.printStackTrace(); - } + } } public ApplicationMaster() throws Exception { // Set up the configuration and RPC - conf = new Configuration(); + conf = new YarnConfiguration(); rpc = YarnRPC.create(conf); } + /** * Parse command line options - * @param args Command line args - * @return Whether init successful and run should be invoked + * + * @param args Command line args + * @return Whether init successful and run should be invoked * @throws ParseException - * @throws IOException + * @throws IOException */ public boolean init(String[] args) throws ParseException, IOException { Options opts = new Options(); - opts.addOption("app_attempt_id", true, "App Attempt ID. Not to be used unless for testing purposes"); - opts.addOption("shell_command", true, "Shell command to be executed by the Application Master"); - opts.addOption("shell_script", true, "Location of the shell script to be executed"); + opts.addOption("app_attempt_id", true, + "App Attempt ID. Not to be used unless for testing purposes"); + opts.addOption("shell_command", true, + "Shell command to be executed by the Application Master"); + opts.addOption("shell_script", true, + "Location of the shell script to be executed"); opts.addOption("shell_args", true, "Command line args for the shell script"); - opts.addOption("shell_env", true, "Environment for shell script. Specified as env_key=env_val pairs"); - opts.addOption("container_memory", true, "Amount of memory in MB to be requested to run the shell command"); - opts.addOption("num_containers", true, "No. of containers on which the shell command needs to be executed"); + opts.addOption("shell_env", true, + "Environment for shell script. Specified as env_key=env_val pairs"); + opts.addOption("container_memory", true, + "Amount of memory in MB to be requested to run the shell command"); + opts.addOption("num_containers", true, + "No. of containers on which the shell command needs to be executed"); opts.addOption("priority", true, "Application Priority. Default 0"); opts.addOption("debug", false, "Dump out debug information"); @@ -275,7 +304,8 @@ public class ApplicationMaster { if (args.length == 0) { printUsage(opts); - throw new IllegalArgumentException("No args specified for application master to initialize"); + throw new IllegalArgumentException( + "No args specified for application master to initialize"); } if (cliParser.hasOption("help")) { @@ -289,7 +319,6 @@ public class ApplicationMaster { Map envs = System.getenv(); - appAttemptID = Records.newRecord(ApplicationAttemptId.class); if (envs.containsKey(ApplicationConstants.AM_APP_ATTEMPT_ID_ENV)) { appAttemptID = ConverterUtils.toApplicationAttemptId(envs .get(ApplicationConstants.AM_APP_ATTEMPT_ID_ENV)); @@ -297,29 +326,31 @@ public class ApplicationMaster { if (cliParser.hasOption("app_attempt_id")) { String appIdStr = cliParser.getOptionValue("app_attempt_id", ""); appAttemptID = ConverterUtils.toApplicationAttemptId(appIdStr); - } - else { - throw new IllegalArgumentException("Application Attempt Id not set in the environment"); + } else { + throw new IllegalArgumentException( + "Application Attempt Id not set in the environment"); } } else { - ContainerId containerId = ConverterUtils.toContainerId(envs.get(ApplicationConstants.AM_CONTAINER_ID_ENV)); + ContainerId containerId = ConverterUtils.toContainerId(envs + .get(ApplicationConstants.AM_CONTAINER_ID_ENV)); appAttemptID = containerId.getApplicationAttemptId(); } - LOG.info("Application master for app" - + ", appId=" + appAttemptID.getApplicationId().getId() - + ", clustertimestamp=" + appAttemptID.getApplicationId().getClusterTimestamp() + LOG.info("Application master for app" + ", appId=" + + appAttemptID.getApplicationId().getId() + ", clustertimestamp=" + + appAttemptID.getApplicationId().getClusterTimestamp() + ", attemptId=" + appAttemptID.getAttemptId()); if (!cliParser.hasOption("shell_command")) { - throw new IllegalArgumentException("No shell command specified to be executed by application master"); + throw new IllegalArgumentException( + "No shell command specified to be executed by application master"); } shellCommand = cliParser.getOptionValue("shell_command"); if (cliParser.hasOption("shell_args")) { shellArgs = cliParser.getOptionValue("shell_args"); } - if (cliParser.hasOption("shell_env")) { + if (cliParser.hasOption("shell_env")) { String shellEnvs[] = cliParser.getOptionValues("shell_env"); for (String env : shellEnvs) { env = env.trim(); @@ -330,8 +361,8 @@ public class ApplicationMaster { } String key = env.substring(0, index); String val = ""; - if (index < (env.length()-1)) { - val = env.substring(index+1); + if (index < (env.length() - 1)) { + val = env.substring(index + 1); } shellEnv.put(key, val); } @@ -341,32 +372,37 @@ public class ApplicationMaster { shellScriptPath = envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLOCATION); if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)) { - shellScriptPathTimestamp = Long.valueOf(envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)); + shellScriptPathTimestamp = Long.valueOf(envs + .get(DSConstants.DISTRIBUTEDSHELLSCRIPTTIMESTAMP)); } if (envs.containsKey(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN)) { - shellScriptPathLen = Long.valueOf(envs.get(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN)); + shellScriptPathLen = Long.valueOf(envs + .get(DSConstants.DISTRIBUTEDSHELLSCRIPTLEN)); } if (!shellScriptPath.isEmpty() - && (shellScriptPathTimestamp <= 0 - || shellScriptPathLen <= 0)) { - LOG.error("Illegal values in env for shell script path" - + ", path=" + shellScriptPath - + ", len=" + shellScriptPathLen - + ", timestamp=" + shellScriptPathTimestamp); - throw new IllegalArgumentException("Illegal values in env for shell script path"); + && (shellScriptPathTimestamp <= 0 || shellScriptPathLen <= 0)) { + LOG.error("Illegal values in env for shell script path" + ", path=" + + shellScriptPath + ", len=" + shellScriptPathLen + ", timestamp=" + + shellScriptPathTimestamp); + throw new IllegalArgumentException( + "Illegal values in env for shell script path"); } } - containerMemory = Integer.parseInt(cliParser.getOptionValue("container_memory", "10")); - numTotalContainers = Integer.parseInt(cliParser.getOptionValue("num_containers", "1")); - requestPriority = Integer.parseInt(cliParser.getOptionValue("priority", "0")); + containerMemory = Integer.parseInt(cliParser.getOptionValue( + "container_memory", "10")); + numTotalContainers = Integer.parseInt(cliParser.getOptionValue( + "num_containers", "1")); + requestPriority = Integer.parseInt(cliParser + .getOptionValue("priority", "0")); return true; } /** - * Helper function to print usage + * Helper function to print usage + * * @param opts Parsed command line options */ private void printUsage(Options opts) { @@ -375,228 +411,240 @@ public class ApplicationMaster { /** * Main run function for the application master + * * @throws YarnRemoteException */ public boolean run() throws YarnRemoteException { LOG.info("Starting ApplicationMaster"); // Connect to ResourceManager - resourceManager = connectToRM(); + resourceManager = new AMRMClientImpl(appAttemptID); + resourceManager.init(conf); + resourceManager.start(); - // Setup local RPC Server to accept status requests directly from clients - // TODO need to setup a protocol for client to be able to communicate to the RPC server - // TODO use the rpc port info to register with the RM for the client to send requests to this app master + try { + // Setup local RPC Server to accept status requests directly from clients + // TODO need to setup a protocol for client to be able to communicate to + // the RPC server + // TODO use the rpc port info to register with the RM for the client to + // send requests to this app master - // Register self with ResourceManager - RegisterApplicationMasterResponse response = registerToRM(); - // Dump out information about cluster capability as seen by the resource manager - int minMem = response.getMinimumResourceCapability().getMemory(); - int maxMem = response.getMaximumResourceCapability().getMemory(); - LOG.info("Min mem capabililty of resources in this cluster " + minMem); - LOG.info("Max mem capabililty of resources in this cluster " + maxMem); + // Register self with ResourceManager + RegisterApplicationMasterResponse response = resourceManager + .registerApplicationMaster(appMasterHostname, appMasterRpcPort, + appMasterTrackingUrl); + // Dump out information about cluster capability as seen by the + // resource manager + int minMem = response.getMinimumResourceCapability().getMemory(); + int maxMem = response.getMaximumResourceCapability().getMemory(); + LOG.info("Min mem capabililty of resources in this cluster " + minMem); + LOG.info("Max mem capabililty of resources in this cluster " + maxMem); - // A resource ask has to be atleast the minimum of the capability of the cluster, the value has to be - // a multiple of the min value and cannot exceed the max. - // If it is not an exact multiple of min, the RM will allocate to the nearest multiple of min - if (containerMemory < minMem) { - LOG.info("Container memory specified below min threshold of cluster. Using min value." - + ", specified=" + containerMemory - + ", min=" + minMem); - containerMemory = minMem; - } - else if (containerMemory > maxMem) { - LOG.info("Container memory specified above max threshold of cluster. Using max value." - + ", specified=" + containerMemory - + ", max=" + maxMem); - containerMemory = maxMem; - } - - // Setup heartbeat emitter - // TODO poll RM every now and then with an empty request to let RM know that we are alive - // The heartbeat interval after which an AM is timed out by the RM is defined by a config setting: - // RM_AM_EXPIRY_INTERVAL_MS with default defined by DEFAULT_RM_AM_EXPIRY_INTERVAL_MS - // The allocate calls to the RM count as heartbeats so, for now, this additional heartbeat emitter - // is not required. - - // Setup ask for containers from RM - // Send request for containers to RM - // Until we get our fully allocated quota, we keep on polling RM for containers - // Keep looping until all the containers are launched and shell script executed on them - // ( regardless of success/failure). - - int loopCounter = -1; - - while (numCompletedContainers.get() < numTotalContainers - && !appDone) { - loopCounter++; - - // log current state - LOG.info("Current application state: loop=" + loopCounter - + ", appDone=" + appDone - + ", total=" + numTotalContainers - + ", requested=" + numRequestedContainers - + ", completed=" + numCompletedContainers - + ", failed=" + numFailedContainers - + ", currentAllocated=" + numAllocatedContainers); - - // Sleep before each loop when asking RM for containers - // to avoid flooding RM with spurious requests when it - // need not have any available containers - // Sleeping for 1000 ms. - try { - Thread.sleep(1000); - } catch (InterruptedException e) { - LOG.info("Sleep interrupted " + e.getMessage()); + // A resource ask has to be atleast the minimum of the capability of the + // cluster, the value has to be a multiple of the min value and cannot + // exceed the max. + // If it is not an exact multiple of min, the RM will allocate to the + // nearest multiple of min + if (containerMemory < minMem) { + LOG.info("Container memory specified below min threshold of cluster." + + " Using min value." + ", specified=" + containerMemory + ", min=" + + minMem); + containerMemory = minMem; + } else if (containerMemory > maxMem) { + LOG.info("Container memory specified above max threshold of cluster." + + " Using max value." + ", specified=" + containerMemory + ", max=" + + maxMem); + containerMemory = maxMem; } - // No. of containers to request - // For the first loop, askCount will be equal to total containers needed - // From that point on, askCount will always be 0 as current implementation - // does not change its ask on container failures. - int askCount = numTotalContainers - numRequestedContainers.get(); - numRequestedContainers.addAndGet(askCount); + // Setup heartbeat emitter + // TODO poll RM every now and then with an empty request to let RM know + // that we are alive + // The heartbeat interval after which an AM is timed out by the RM is + // defined by a config setting: + // RM_AM_EXPIRY_INTERVAL_MS with default defined by + // DEFAULT_RM_AM_EXPIRY_INTERVAL_MS + // The allocate calls to the RM count as heartbeats so, for now, + // this additional heartbeat emitter is not required. - // Setup request to be sent to RM to allocate containers - List resourceReq = new ArrayList(); - if (askCount > 0) { - ResourceRequest containerAsk = setupContainerAskForRM(askCount); - resourceReq.add(containerAsk); - } + // Setup ask for containers from RM + // Send request for containers to RM + // Until we get our fully allocated quota, we keep on polling RM for + // containers + // Keep looping until all the containers are launched and shell script + // executed on them ( regardless of success/failure). - // Send the request to RM - LOG.info("Asking RM for containers" - + ", askCount=" + askCount); - AMResponse amResp =sendContainerAskToRM(resourceReq); + int loopCounter = -1; - // Retrieve list of allocated containers from the response - List allocatedContainers = amResp.getAllocatedContainers(); - LOG.info("Got response from RM for container ask, allocatedCnt=" + allocatedContainers.size()); - numAllocatedContainers.addAndGet(allocatedContainers.size()); - for (Container allocatedContainer : allocatedContainers) { - LOG.info("Launching shell command on a new container." - + ", containerId=" + allocatedContainer.getId() - + ", containerNode=" + allocatedContainer.getNodeId().getHost() - + ":" + allocatedContainer.getNodeId().getPort() - + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() - + ", containerState" + allocatedContainer.getState() - + ", containerResourceMemory" + allocatedContainer.getResource().getMemory()); - //+ ", containerToken" + allocatedContainer.getContainerToken().getIdentifier().toString()); + while (numCompletedContainers.get() < numTotalContainers && !appDone) { + loopCounter++; - LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable(allocatedContainer); - Thread launchThread = new Thread(runnableLaunchContainer); + // log current state + LOG.info("Current application state: loop=" + loopCounter + + ", appDone=" + appDone + ", total=" + numTotalContainers + + ", requested=" + numRequestedContainers + ", completed=" + + numCompletedContainers + ", failed=" + numFailedContainers + + ", currentAllocated=" + numAllocatedContainers); - // launch and start the container on a separate thread to keep the main thread unblocked - // as all containers may not be allocated at one go. - launchThreads.add(launchThread); - launchThread.start(); - } + // Sleep before each loop when asking RM for containers + // to avoid flooding RM with spurious requests when it + // need not have any available containers + // Sleeping for 1000 ms. + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOG.info("Sleep interrupted " + e.getMessage()); + } - // Check what the current available resources in the cluster are - // TODO should we do anything if the available resources are not enough? - Resource availableResources = amResp.getAvailableResources(); - LOG.info("Current available resources in the cluster " + availableResources); + // No. of containers to request + // For the first loop, askCount will be equal to total containers needed + // From that point on, askCount will always be 0 as current + // implementation does not change its ask on container failures. + int askCount = numTotalContainers - numRequestedContainers.get(); + numRequestedContainers.addAndGet(askCount); - // Check the completed containers - List completedContainers = amResp.getCompletedContainersStatuses(); - LOG.info("Got response from RM for container ask, completedCnt=" + completedContainers.size()); - for (ContainerStatus containerStatus : completedContainers) { - LOG.info("Got container status for containerID= " + containerStatus.getContainerId() - + ", state=" + containerStatus.getState() - + ", exitStatus=" + containerStatus.getExitStatus() - + ", diagnostics=" + containerStatus.getDiagnostics()); + if (askCount > 0) { + ContainerRequest containerAsk = setupContainerAskForRM(askCount); + resourceManager.addContainerRequest(containerAsk); + } - // non complete containers should not be here - assert(containerStatus.getState() == ContainerState.COMPLETE); + // Send the request to RM + LOG.info("Asking RM for containers" + ", askCount=" + askCount); + AMResponse amResp = sendContainerAskToRM(); - // increment counters for completed/failed containers - int exitStatus = containerStatus.getExitStatus(); - if (0 != exitStatus) { - // container failed - if (-100 != exitStatus) { - // shell script failed - // counts as completed + // Retrieve list of allocated containers from the response + List allocatedContainers = amResp.getAllocatedContainers(); + LOG.info("Got response from RM for container ask, allocatedCnt=" + + allocatedContainers.size()); + numAllocatedContainers.addAndGet(allocatedContainers.size()); + for (Container allocatedContainer : allocatedContainers) { + LOG.info("Launching shell command on a new container." + + ", containerId=" + allocatedContainer.getId() + + ", containerNode=" + allocatedContainer.getNodeId().getHost() + + ":" + allocatedContainer.getNodeId().getPort() + + ", containerNodeURI=" + allocatedContainer.getNodeHttpAddress() + + ", containerState" + allocatedContainer.getState() + + ", containerResourceMemory" + + allocatedContainer.getResource().getMemory()); + // + ", containerToken" + // +allocatedContainer.getContainerToken().getIdentifier().toString()); + + LaunchContainerRunnable runnableLaunchContainer = new LaunchContainerRunnable( + allocatedContainer); + Thread launchThread = new Thread(runnableLaunchContainer); + + // launch and start the container on a separate thread to keep + // the main thread unblocked + // as all containers may not be allocated at one go. + launchThreads.add(launchThread); + launchThread.start(); + } + + // Check what the current available resources in the cluster are + // TODO should we do anything if the available resources are not enough? + Resource availableResources = amResp.getAvailableResources(); + LOG.info("Current available resources in the cluster " + + availableResources); + + // Check the completed containers + List completedContainers = amResp + .getCompletedContainersStatuses(); + LOG.info("Got response from RM for container ask, completedCnt=" + + completedContainers.size()); + for (ContainerStatus containerStatus : completedContainers) { + LOG.info("Got container status for containerID=" + + containerStatus.getContainerId() + ", state=" + + containerStatus.getState() + ", exitStatus=" + + containerStatus.getExitStatus() + ", diagnostics=" + + containerStatus.getDiagnostics()); + + // non complete containers should not be here + assert (containerStatus.getState() == ContainerState.COMPLETE); + + // increment counters for completed/failed containers + int exitStatus = containerStatus.getExitStatus(); + if (0 != exitStatus) { + // container failed + if (-100 != exitStatus) { + // shell script failed + // counts as completed + numCompletedContainers.incrementAndGet(); + numFailedContainers.incrementAndGet(); + } else { + // something else bad happened + // app job did not complete for some reason + // we should re-try as the container was lost for some reason + numAllocatedContainers.decrementAndGet(); + numRequestedContainers.decrementAndGet(); + // we do not need to release the container as it would be done + // by the RM/CM. + } + } else { + // nothing to do + // container completed successfully numCompletedContainers.incrementAndGet(); - numFailedContainers.incrementAndGet(); - } - else { - // something else bad happened - // app job did not complete for some reason - // we should re-try as the container was lost for some reason - numAllocatedContainers.decrementAndGet(); - numRequestedContainers.decrementAndGet(); - // we do not need to release the container as it would be done - // by the RM/CM. + LOG.info("Container completed successfully." + ", containerId=" + + containerStatus.getContainerId()); } } - else { - // nothing to do - // container completed successfully - numCompletedContainers.incrementAndGet(); - LOG.info("Container completed successfully." - + ", containerId=" + containerStatus.getContainerId()); + if (numCompletedContainers.get() == numTotalContainers) { + appDone = true; } - } - if (numCompletedContainers.get() == numTotalContainers) { - appDone = true; + LOG.info("Current application state: loop=" + loopCounter + + ", appDone=" + appDone + ", total=" + numTotalContainers + + ", requested=" + numRequestedContainers + ", completed=" + + numCompletedContainers + ", failed=" + numFailedContainers + + ", currentAllocated=" + numAllocatedContainers); + + // TODO + // Add a timeout handling layer + // for misbehaving shell commands } - LOG.info("Current application state: loop=" + loopCounter - + ", appDone=" + appDone - + ", total=" + numTotalContainers - + ", requested=" + numRequestedContainers - + ", completed=" + numCompletedContainers - + ", failed=" + numFailedContainers - + ", currentAllocated=" + numAllocatedContainers); - - // TODO - // Add a timeout handling layer - // for misbehaving shell commands - } - - // Join all launched threads - // needed for when we time out - // and we need to release containers - for (Thread launchThread : launchThreads) { - try { - launchThread.join(10000); - } catch (InterruptedException e) { - LOG.info("Exception thrown in thread join: " + e.getMessage()); - e.printStackTrace(); + // Join all launched threads + // needed for when we time out + // and we need to release containers + for (Thread launchThread : launchThreads) { + try { + launchThread.join(10000); + } catch (InterruptedException e) { + LOG.info("Exception thrown in thread join: " + e.getMessage()); + e.printStackTrace(); + } } - } - // When the application completes, it should send a finish application signal - // to the RM - LOG.info("Application completed. Signalling finish to RM"); + // When the application completes, it should send a finish application + // signal to the RM + LOG.info("Application completed. Signalling finish to RM"); - FinishApplicationMasterRequest finishReq = Records.newRecord(FinishApplicationMasterRequest.class); - finishReq.setAppAttemptId(appAttemptID); - boolean isSuccess = true; - if (numFailedContainers.get() == 0) { - finishReq.setFinishApplicationStatus(FinalApplicationStatus.SUCCEEDED); + FinalApplicationStatus appStatus; + String appMessage = null; + boolean isSuccess = true; + if (numFailedContainers.get() == 0) { + appStatus = FinalApplicationStatus.SUCCEEDED; + } else { + appStatus = FinalApplicationStatus.FAILED; + appMessage = "Diagnostics." + ", total=" + numTotalContainers + + ", completed=" + numCompletedContainers.get() + ", allocated=" + + numAllocatedContainers.get() + ", failed=" + + numFailedContainers.get(); + isSuccess = false; + } + resourceManager.unregisterApplicationMaster(appStatus, appMessage, null); + return isSuccess; + } finally { + resourceManager.stop(); } - else { - finishReq.setFinishApplicationStatus(FinalApplicationStatus.FAILED); - String diagnostics = "Diagnostics." - + ", total=" + numTotalContainers - + ", completed=" + numCompletedContainers.get() - + ", allocated=" + numAllocatedContainers.get() - + ", failed=" + numFailedContainers.get(); - finishReq.setDiagnostics(diagnostics); - isSuccess = false; - } - resourceManager.finishApplicationMaster(finishReq); - return isSuccess; } /** - * Thread to connect to the {@link ContainerManager} and - * launch the container that will execute the shell command. + * Thread to connect to the {@link ContainerManager} and launch the container + * that will execute the shell command. */ private class LaunchContainerRunnable implements Runnable { - // Allocated container + // Allocated container Container container; // Handle to communicate with ContainerManager ContainerManager cm; @@ -612,15 +660,16 @@ public class ApplicationMaster { * Helper function to connect to CM */ private void connectToCM() { - LOG.debug("Connecting to ContainerManager for containerid=" + container.getId()); + LOG.debug("Connecting to ContainerManager for containerid=" + + container.getId()); String cmIpPortStr = container.getNodeId().getHost() + ":" + container.getNodeId().getPort(); InetSocketAddress cmAddress = NetUtils.createSocketAddr(cmIpPortStr); LOG.info("Connecting to ContainerManager at " + cmIpPortStr); - this.cm = ((ContainerManager) rpc.getProxy(ContainerManager.class, cmAddress, conf)); + this.cm = ((ContainerManager) rpc.getProxy(ContainerManager.class, + cmAddress, conf)); } - @Override /** * Connects to CM, sets up container launch context @@ -628,11 +677,13 @@ public class ApplicationMaster { * start request to the CM. */ public void run() { - // Connect to ContainerManager + // Connect to ContainerManager connectToCM(); - LOG.info("Setting up container launch container for containerid=" + container.getId()); - ContainerLaunchContext ctx = Records.newRecord(ContainerLaunchContext.class); + LOG.info("Setting up container launch container for containerid=" + + container.getId()); + ContainerLaunchContext ctx = Records + .newRecord(ContainerLaunchContext.class); ctx.setContainerId(container.getId()); ctx.setResource(container.getResource()); @@ -642,28 +693,30 @@ public class ApplicationMaster { ctx.setUser(jobUserName); LOG.info("Setting user in ContainerLaunchContext to: " + jobUserName); - // Set the environment + // Set the environment ctx.setEnvironment(shellEnv); - // Set the local resources + // Set the local resources Map localResources = new HashMap(); - // The container for the eventual shell commands needs its own local resources too. - // In this scenario, if a shell script is specified, we need to have it copied - // and made available to the container. + // The container for the eventual shell commands needs its own local + // resources too. + // In this scenario, if a shell script is specified, we need to have it + // copied and made available to the container. if (!shellScriptPath.isEmpty()) { LocalResource shellRsrc = Records.newRecord(LocalResource.class); shellRsrc.setType(LocalResourceType.FILE); shellRsrc.setVisibility(LocalResourceVisibility.APPLICATION); try { - shellRsrc.setResource(ConverterUtils.getYarnUrlFromURI(new URI(shellScriptPath))); + shellRsrc.setResource(ConverterUtils.getYarnUrlFromURI(new URI( + shellScriptPath))); } catch (URISyntaxException e) { - LOG.error("Error when trying to use shell script path specified in env" - + ", path=" + shellScriptPath); + LOG.error("Error when trying to use shell script path specified" + + " in env, path=" + shellScriptPath); e.printStackTrace(); - // A failure scenario on bad input such as invalid shell script path - // We know we cannot continue launching the container + // A failure scenario on bad input such as invalid shell script path + // We know we cannot continue launching the container // so we should release it. // TODO numCompletedContainers.incrementAndGet(); @@ -676,12 +729,12 @@ public class ApplicationMaster { } ctx.setLocalResources(localResources); - // Set the necessary command to execute on the allocated container + // Set the necessary command to execute on the allocated container Vector vargs = new Vector(5); - // Set executable command + // Set executable command vargs.add(shellCommand); - // Set shell script path + // Set shell script path if (!shellScriptPath.isEmpty()) { vargs.add(ExecShellStringPath); } @@ -689,11 +742,6 @@ public class ApplicationMaster { // Set args for the shell command if any vargs.add(shellArgs); // Add log redirect params - // TODO - // We should redirect the output to hdfs instead of local logs - // so as to be able to look at the final output after the containers - // have been released. - // Could use a path suffixed with /AppId/AppAttempId/ContainerId/std[out|err] vargs.add("1>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stdout"); vargs.add("2>" + ApplicationConstants.LOG_DIR_EXPANSION_VAR + "/stderr"); @@ -707,131 +755,78 @@ public class ApplicationMaster { commands.add(command.toString()); ctx.setCommands(commands); - StartContainerRequest startReq = Records.newRecord(StartContainerRequest.class); + StartContainerRequest startReq = Records + .newRecord(StartContainerRequest.class); startReq.setContainerLaunchContext(ctx); try { cm.startContainer(startReq); } catch (YarnRemoteException e) { - LOG.info("Start container failed for :" - + ", containerId=" + container.getId()); + LOG.info("Start container failed for :" + ", containerId=" + + container.getId()); e.printStackTrace(); - // TODO do we need to release this container? + // TODO do we need to release this container? } // Get container status? - // Left commented out as the shell scripts are short lived - // and we are relying on the status for completed containers from RM to detect status + // Left commented out as the shell scripts are short lived + // and we are relying on the status for completed containers + // from RM to detect status - // GetContainerStatusRequest statusReq = Records.newRecord(GetContainerStatusRequest.class); - // statusReq.setContainerId(container.getId()); - // GetContainerStatusResponse statusResp; - //try { - //statusResp = cm.getContainerStatus(statusReq); - // LOG.info("Container Status" - // + ", id=" + container.getId() - // + ", status=" +statusResp.getStatus()); - //} catch (YarnRemoteException e) { - //e.printStackTrace(); - //} + // GetContainerStatusRequest statusReq = + // Records.newRecord(GetContainerStatusRequest.class); + // statusReq.setContainerId(container.getId()); + // GetContainerStatusResponse statusResp; + // try { + // statusResp = cm.getContainerStatus(statusReq); + // LOG.info("Container Status" + // + ", id=" + container.getId() + // + ", status=" +statusResp.getStatus()); + // } catch (YarnRemoteException e) { + // e.printStackTrace(); + // } } } - /** - * Connect to the Resource Manager - * @return Handle to communicate with the RM - */ - private AMRMProtocol connectToRM() { - YarnConfiguration yarnConf = new YarnConfiguration(conf); - InetSocketAddress rmAddress = yarnConf.getSocketAddr( - YarnConfiguration.RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_ADDRESS, - YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT); - LOG.info("Connecting to ResourceManager at " + rmAddress); - return ((AMRMProtocol) rpc.getProxy(AMRMProtocol.class, rmAddress, conf)); - } - - /** - * Register the Application Master to the Resource Manager - * @return the registration response from the RM - * @throws YarnRemoteException - */ - private RegisterApplicationMasterResponse registerToRM() throws YarnRemoteException { - RegisterApplicationMasterRequest appMasterRequest = Records.newRecord(RegisterApplicationMasterRequest.class); - - // set the required info into the registration request: - // application attempt id, - // host on which the app master is running - // rpc port on which the app master accepts requests from the client - // tracking url for the app master - appMasterRequest.setApplicationAttemptId(appAttemptID); - appMasterRequest.setHost(appMasterHostname); - appMasterRequest.setRpcPort(appMasterRpcPort); - appMasterRequest.setTrackingUrl(appMasterTrackingUrl); - - return resourceManager.registerApplicationMaster(appMasterRequest); - } - /** * Setup the request that will be sent to the RM for the container ask. + * * @param numContainers Containers to ask for from RM * @return the setup ResourceRequest to be sent to RM */ - private ResourceRequest setupContainerAskForRM(int numContainers) { - ResourceRequest request = Records.newRecord(ResourceRequest.class); - - // setup requirements for hosts - // whether a particular rack/host is needed - // Refer to apis under org.apache.hadoop.net for more - // details on how to get figure out rack/host mapping. + private ContainerRequest setupContainerAskForRM(int numContainers) { + // setup requirements for hosts // using * as any host will do for the distributed shell app - request.setHostName("*"); - - // set no. of containers needed - request.setNumContainers(numContainers); - // set the priority for the request Priority pri = Records.newRecord(Priority.class); - // TODO - what is the range for priority? how to decide? + // TODO - what is the range for priority? how to decide? pri.setPriority(requestPriority); - request.setPriority(pri); // Set up resource type requirements // For now, only memory is supported so we set memory requirements Resource capability = Records.newRecord(Resource.class); capability.setMemory(containerMemory); - request.setCapability(capability); + ContainerRequest request = new ContainerRequest(capability, null, null, + pri, numContainers); + LOG.info("Requested container ask: " + request.toString()); return request; } /** * Ask RM to allocate given no. of containers to this Application Master + * * @param requestedContainers Containers to ask for from RM - * @return Response from RM to AM with allocated containers + * @return Response from RM to AM with allocated containers * @throws YarnRemoteException */ - private AMResponse sendContainerAskToRM(List requestedContainers) - throws YarnRemoteException { - AllocateRequest req = Records.newRecord(AllocateRequest.class); - req.setResponseId(rmRequestID.incrementAndGet()); - req.setApplicationAttemptId(appAttemptID); - req.addAllAsks(requestedContainers); - req.addAllReleases(releasedContainers); - req.setProgress((float)numCompletedContainers.get()/numTotalContainers); + private AMResponse sendContainerAskToRM() throws YarnRemoteException { + float progressIndicator = (float) numCompletedContainers.get() + / numTotalContainers; - LOG.info("Sending request to RM for containers" - + ", requestedSet=" + requestedContainers.size() - + ", releasedSet=" + releasedContainers.size() - + ", progress=" + req.getProgress()); + LOG.info("Sending request to RM for containers" + ", progress=" + + progressIndicator); - for (ResourceRequest rsrcReq : requestedContainers) { - LOG.info("Requested container ask: " + rsrcReq.toString()); - } - for (ContainerId id : releasedContainers) { - LOG.info("Released container, id=" + id.getId()); - } - - AllocateResponse resp = resourceManager.allocate(req); + AllocateResponse resp = resourceManager.allocate(progressIndicator); return resp.getAMResponse(); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java index d857558fbd5..26976a1f348 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java @@ -18,10 +18,7 @@ package org.apache.hadoop.yarn.applications.distributedshell; -import java.io.BufferedReader; import java.io.IOException; -import java.io.InputStream; -import java.io.InputStreamReader; import java.util.ArrayList; import java.util.HashMap; import java.util.List; @@ -212,7 +209,7 @@ public class Client extends YarnClientImpl { /** */ public Client() throws Exception { - this(new Configuration()); + this(new YarnConfiguration()); } /** diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java index 08381786928..e6d8ae95f70 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/test/java/org/apache/hadoop/yarn/applications/distributedshell/TestDistributedShell.java @@ -120,6 +120,7 @@ public class TestDistributedShell { boolean exceptionThrown = false; try { boolean initSuccess = client.init(args); + Assert.assertTrue(initSuccess); } catch (IllegalArgumentException e) { exceptionThrown = true; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 6625a9cfe53..9e78b1187e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.RMDelegationTokenRenewer; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml index b7f4df73ff8..7b91597754e 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml @@ -45,7 +45,69 @@ + + + + ${basedir}/src/main/resources + + yarn-version-info.properties + + false + + + ${basedir}/src/main/resources + + yarn-version-info.properties + + true + + + + org.apache.rat + apache-rat-plugin + + + src/main/resources/webapps/mapreduce/.keep + src/main/resources/webapps/jobhistory/.keep + src/main/resources/webapps/yarn/.keep + src/main/resources/webapps/cluster/.keep + src/main/resources/webapps/test/.keep + src/main/resources/webapps/proxy/.keep + src/main/resources/webapps/node/.keep + src/main/resources/webapps/static/dt-1.9.4/css/jui-dt.css + src/main/resources/webapps/static/dt-1.9.4/css/demo_table.css + src/main/resources/webapps/static/jquery/themes-1.9.1/base/jquery-ui.css + + + + + org.apache.hadoop + hadoop-maven-plugins + + + version-info + + version-info + + + + ${basedir}/src/main + + java/**/*.java + proto/**/*.proto + + + + + + maven-jar-plugin @@ -109,20 +171,6 @@ exec - - generate-version - generate-sources - - scripts/saveVersion.sh - - ${project.version} - ${project.build.directory} - - - - exec - - diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/scripts/saveVersion.sh b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/scripts/saveVersion.sh deleted file mode 100755 index e644bbff50f..00000000000 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/scripts/saveVersion.sh +++ /dev/null @@ -1,62 +0,0 @@ -#!/bin/sh - -# Licensed to the Apache Software Foundation (ASF) under one or more -# contributor license agreements. See the NOTICE file distributed with -# this work for additional information regarding copyright ownership. -# The ASF licenses this file to You under the Apache License, Version 2.0 -# (the "License"); you may not use this file except in compliance with -# the License. You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -# This file is used to generate the package-info.java class that -# records the version, revision, branch, user, timestamp, and url -unset LANG -unset LC_CTYPE -unset LC_TIME -version=$1 -build_dir=$2 -user=`whoami` -date=`date` -dir=`pwd` -cwd=`dirname $dir` -if git rev-parse HEAD 2>/dev/null > /dev/null ; then - revision=`git log -1 --pretty=format:"%H" ../` - hostname=`hostname` - branch=`git branch | sed -n -e 's/^* //p'` - url="git://${hostname}${cwd}" -elif [ -d .svn ]; then - revision=`svn info ../ | sed -n -e 's/Last Changed Rev: \(.*\)/\1/p'` - url=`svn info ../ | sed -n -e 's/^URL: \(.*\)/\1/p'` - # Get canonical branch (branches/X, tags/X, or trunk) - branch=`echo $url | sed -n -e 's,.*\(branches/.*\)$,\1,p' \ - -e 's,.*\(tags/.*\)$,\1,p' \ - -e 's,.*trunk$,trunk,p'` -else - revision="Unknown" - branch="Unknown" - url="file://$cwd" -fi -srcChecksum=`find ../ -name '*.java' | grep -v generated-sources | LC_ALL=C sort | xargs md5sum | md5sum | cut -d ' ' -f 1` - -mkdir -p $build_dir/generated-sources/version/org/apache/hadoop/yarn/ -cat << EOF | \ - sed -e "s/VERSION/$version/" -e "s/USER/$user/" -e "s/DATE/$date/" \ - -e "s|URL|$url|" -e "s/REV/$revision/" \ - -e "s|BRANCH|$branch|" -e "s/SRCCHECKSUM/$srcChecksum/" \ - > $build_dir/generated-sources/version/org/apache/hadoop/yarn/package-info.java -/* - * Generated by saveVersion.sh - */ -@YarnVersionAnnotation(version="VERSION", revision="REV", branch="BRANCH", - user="USER", date="DATE", url="URL", - srcChecksum="SRCCHECKSUM") -package org.apache.hadoop.yarn; -EOF diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java index 6302725250e..25212b83002 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/client/ClientRMProtocolPBClientImpl.java @@ -25,6 +25,9 @@ import java.net.InetSocketAddress; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.ClientRMProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest; @@ -77,17 +80,14 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationReque import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; import org.apache.hadoop.yarn.exceptions.impl.pb.YarnRemoteExceptionPBImpl; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.CancelDelegationTokenRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RenewDelegationTokenRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; import com.google.protobuf.ServiceException; @@ -101,9 +101,7 @@ public class ClientRMProtocolPBClientImpl implements ClientRMProtocol, InetSocketAddress addr, Configuration conf) throws IOException { RPC.setProtocolEngine(conf, ClientRMProtocolPB.class, ProtobufRpcEngine.class); - proxy = - (ClientRMProtocolPB) RPC.getProxy(ClientRMProtocolPB.class, - clientVersion, addr, conf); + proxy = RPC.getProxy(ClientRMProtocolPB.class, clientVersion, addr, conf); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java index 9eecdad68a6..1c2d5b05ebf 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/impl/pb/service/ClientRMProtocolPBServiceImpl.java @@ -18,6 +18,12 @@ package org.apache.hadoop.yarn.api.impl.pb.service; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.CancelDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.GetDelegationTokenResponseProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenRequestProto; +import org.apache.hadoop.security.proto.SecurityProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.yarn.api.ClientRMProtocol; import org.apache.hadoop.yarn.api.ClientRMProtocolPB; import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenResponse; @@ -57,8 +63,6 @@ import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.RenewDelegationTokenRe import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationRequestPBImpl; import org.apache.hadoop.yarn.api.protocolrecords.impl.pb.SubmitApplicationResponsePBImpl; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.CancelDelegationTokenRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.CancelDelegationTokenResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetAllApplicationsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetApplicationReportRequestProto; @@ -67,8 +71,6 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsRequestPr import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterMetricsResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetClusterNodesResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetDelegationTokenResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetNewApplicationResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueInfoRequestProto; @@ -77,8 +79,6 @@ import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoReques import org.apache.hadoop.yarn.proto.YarnServiceProtos.GetQueueUserAclsInfoResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.KillApplicationResponseProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RenewDelegationTokenRequestProto; -import org.apache.hadoop.yarn.proto.YarnServiceProtos.RenewDelegationTokenResponseProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationRequestProto; import org.apache.hadoop.yarn.proto.YarnServiceProtos.SubmitApplicationResponseProto; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java index 832bc0737ce..81c1fe933c2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java @@ -236,6 +236,10 @@ public class YarnConfiguration extends Configuration { /** The class to use as the persistent store.*/ public static final String RM_STORE = RM_PREFIX + "store.class"; + /** URI for FileSystemRMStateStore */ + public static final String FS_RM_STATE_STORE_URI = + RM_PREFIX + "fs.rm-state-store.uri"; + /** The maximum number of completed applications RM keeps. */ public static final String RM_MAX_COMPLETED_APPLICATIONS = RM_PREFIX + "max-completed-applications"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java index 04c192de941..796f71cb074 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/BaseClientToAMTokenSecretManager.java @@ -21,24 +21,25 @@ package org.apache.hadoop.yarn.security.client; import javax.crypto.SecretKey; import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; public abstract class BaseClientToAMTokenSecretManager extends SecretManager { - public abstract SecretKey getMasterKey(ApplicationId applicationId); + public abstract SecretKey getMasterKey( + ApplicationAttemptId applicationAttemptId); @Override public synchronized byte[] createPassword( ClientTokenIdentifier identifier) { return createPassword(identifier.getBytes(), - getMasterKey(identifier.getApplicationID())); + getMasterKey(identifier.getApplicationAttemptID())); } @Override public byte[] retrievePassword(ClientTokenIdentifier identifier) throws SecretManager.InvalidToken { - SecretKey masterKey = getMasterKey(identifier.getApplicationID()); + SecretKey masterKey = getMasterKey(identifier.getApplicationAttemptID()); if (masterKey == null) { throw new SecretManager.InvalidToken("Illegal client-token!"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java index 43aeb392b41..60dc6ebdae6 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientToAMTokenSecretManager.java @@ -21,7 +21,7 @@ package org.apache.hadoop.yarn.security.client; import javax.crypto.SecretKey; import org.apache.hadoop.security.token.SecretManager; -import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; public class ClientToAMTokenSecretManager extends BaseClientToAMTokenSecretManager { @@ -29,14 +29,14 @@ public class ClientToAMTokenSecretManager extends // Only one client-token and one master-key for AM private final SecretKey masterKey; - public ClientToAMTokenSecretManager(ApplicationId applicationID, - byte[] secretKeyBytes) { + public ClientToAMTokenSecretManager( + ApplicationAttemptId applicationAttemptID, byte[] secretKeyBytes) { super(); this.masterKey = SecretManager.createSecretKey(secretKeyBytes); } @Override - public SecretKey getMasterKey(ApplicationId applicationID) { + public SecretKey getMasterKey(ApplicationAttemptId applicationAttemptID) { // Only one client-token and one master-key for AM, just return that. return this.masterKey; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java index dbd3a1fe8eb..07e694fcb55 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/ClientTokenIdentifier.java @@ -27,14 +27,14 @@ import org.apache.hadoop.io.Text; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.TokenIdentifier; -import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.util.BuilderUtils; public class ClientTokenIdentifier extends TokenIdentifier { public static final Text KIND_NAME = new Text("YARN_CLIENT_TOKEN"); - private ApplicationId applicationId; + private ApplicationAttemptId applicationAttemptId; // TODO: Add more information in the tokenID such that it is not // transferrable, more secure etc. @@ -42,25 +42,29 @@ public class ClientTokenIdentifier extends TokenIdentifier { public ClientTokenIdentifier() { } - public ClientTokenIdentifier(ApplicationId id) { + public ClientTokenIdentifier(ApplicationAttemptId id) { this(); - this.applicationId = id; + this.applicationAttemptId = id; } - public ApplicationId getApplicationID() { - return this.applicationId; + public ApplicationAttemptId getApplicationAttemptID() { + return this.applicationAttemptId; } @Override public void write(DataOutput out) throws IOException { - out.writeLong(this.applicationId.getClusterTimestamp()); - out.writeInt(this.applicationId.getId()); + out.writeLong(this.applicationAttemptId.getApplicationId() + .getClusterTimestamp()); + out.writeInt(this.applicationAttemptId.getApplicationId().getId()); + out.writeInt(this.applicationAttemptId.getAttemptId()); } @Override public void readFields(DataInput in) throws IOException { - this.applicationId = - BuilderUtils.newApplicationId(in.readLong(), in.readInt()); + this.applicationAttemptId = + BuilderUtils.newApplicationAttemptId( + BuilderUtils.newApplicationId(in.readLong(), in.readInt()), + in.readInt()); } @Override @@ -70,10 +74,10 @@ public class ClientTokenIdentifier extends TokenIdentifier { @Override public UserGroupInformation getUser() { - if (this.applicationId == null) { + if (this.applicationAttemptId == null) { return null; } - return UserGroupInformation.createRemoteUser(this.applicationId.toString()); + return UserGroupInformation.createRemoteUser(this.applicationAttemptId.toString()); } @InterfaceAudience.Private diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java index 6faba7173e4..12d11f8df86 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/BuilderUtils.java @@ -25,10 +25,6 @@ import java.util.Comparator; import java.util.List; import java.util.Map; -import org.apache.hadoop.classification.InterfaceAudience.Private; -import org.apache.hadoop.classification.InterfaceAudience.Public; -import org.apache.hadoop.classification.InterfaceStability.Stable; -import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.net.NetUtils; import org.apache.hadoop.security.SecurityUtil; import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest; @@ -37,12 +33,14 @@ import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerLaunchContext; import org.apache.hadoop.yarn.api.records.ContainerState; import org.apache.hadoop.yarn.api.records.ContainerStatus; import org.apache.hadoop.yarn.api.records.ContainerToken; +import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.LocalResourceType; @@ -52,9 +50,9 @@ import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeReport; import org.apache.hadoop.yarn.api.records.NodeState; import org.apache.hadoop.yarn.api.records.Priority; -import org.apache.hadoop.yarn.api.records.DelegationToken; import org.apache.hadoop.yarn.api.records.Resource; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.Token; import org.apache.hadoop.yarn.api.records.URL; import org.apache.hadoop.yarn.api.records.YarnApplicationState; import org.apache.hadoop.yarn.factories.RecordFactory; @@ -256,30 +254,36 @@ public class BuilderUtils { return container; } - public static DelegationToken newDelegationToken( - byte[] identifier, String kind, byte[] password, - String service) { - DelegationToken delegationToken = recordFactory.newRecordInstance( - DelegationToken.class); - delegationToken.setIdentifier(ByteBuffer.wrap(identifier)); - delegationToken.setKind(kind); - delegationToken.setPassword(ByteBuffer.wrap(password)); - delegationToken.setService(service); - return delegationToken; + public static T newToken(Class tokenClass, + byte[] identifier, String kind, byte[] password, String service) { + T token = recordFactory.newRecordInstance(tokenClass); + token.setIdentifier(ByteBuffer.wrap(identifier)); + token.setKind(kind); + token.setPassword(ByteBuffer.wrap(password)); + token.setService(service); + return token; } - + + public static DelegationToken newDelegationToken(byte[] identifier, + String kind, byte[] password, String service) { + return newToken(DelegationToken.class, identifier, kind, password, service); + } + + public static ClientToken newClientToken(byte[] identifier, String kind, + byte[] password, String service) { + return newToken(ClientToken.class, identifier, kind, password, service); + } + public static ContainerToken newContainerToken(NodeId nodeId, - ByteBuffer password, ContainerTokenIdentifier tokenIdentifier) { - ContainerToken containerToken = recordFactory - .newRecordInstance(ContainerToken.class); - containerToken.setIdentifier(ByteBuffer.wrap(tokenIdentifier.getBytes())); - containerToken.setKind(ContainerTokenIdentifier.KIND.toString()); - containerToken.setPassword(password); + byte[] password, ContainerTokenIdentifier tokenIdentifier) { // RPC layer client expects ip:port as service for tokens - InetSocketAddress addr = NetUtils.createSocketAddrForHost(nodeId.getHost(), - nodeId.getPort()); - // NOTE: use SecurityUtil.setTokenService if this becomes a "real" token - containerToken.setService(SecurityUtil.buildTokenService(addr).toString()); + InetSocketAddress addr = + NetUtils.createSocketAddrForHost(nodeId.getHost(), nodeId.getPort()); + // NOTE: use SecurityUtil.setTokenService if this becomes a "real" token + ContainerToken containerToken = + newToken(ContainerToken.class, tokenIdentifier.getBytes(), + ContainerTokenIdentifier.KIND.toString(), password, SecurityUtil + .buildTokenService(addr).toString()); return containerToken; } @@ -333,7 +337,7 @@ public class BuilderUtils { public static ApplicationReport newApplicationReport( ApplicationId applicationId, ApplicationAttemptId applicationAttemptId, String user, String queue, String name, String host, int rpcPort, - String clientToken, YarnApplicationState state, String diagnostics, + ClientToken clientToken, YarnApplicationState state, String diagnostics, String url, long startTime, long finishTime, FinalApplicationStatus finalStatus, ApplicationResourceUsageReport appResources, String origTrackingUrl) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java index 2aa67ebf24b..2ab2dfa41d2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/util/YarnVersionInfo.java @@ -20,7 +20,7 @@ package org.apache.hadoop.yarn.util; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.yarn.YarnVersionAnnotation; +import org.apache.hadoop.util.VersionInfo; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -30,31 +30,20 @@ import org.apache.hadoop.classification.InterfaceStability; */ @InterfaceAudience.Private @InterfaceStability.Unstable -public class YarnVersionInfo { +public class YarnVersionInfo extends VersionInfo { private static final Log LOG = LogFactory.getLog(YarnVersionInfo.class); - private static Package myPackage; - private static YarnVersionAnnotation version; - - static { - myPackage = YarnVersionAnnotation.class.getPackage(); - version = myPackage.getAnnotation(YarnVersionAnnotation.class); - } + private static YarnVersionInfo YARN_VERSION_INFO = new YarnVersionInfo(); - /** - * Get the meta-data for the Yarn package. - * @return - */ - static Package getPackage() { - return myPackage; + protected YarnVersionInfo() { + super("yarn"); } - /** * Get the Yarn version. * @return the Yarn version string, eg. "0.6.3-dev" */ public static String getVersion() { - return version != null ? version.version() : "Unknown"; + return YARN_VERSION_INFO._getVersion(); } /** @@ -62,7 +51,7 @@ public class YarnVersionInfo { * @return the revision number, eg. "451451" */ public static String getRevision() { - return version != null ? version.revision() : "Unknown"; + return YARN_VERSION_INFO._getRevision(); } /** @@ -70,7 +59,7 @@ public class YarnVersionInfo { * @return The branch name, e.g. "trunk" or "branches/branch-0.20" */ public static String getBranch() { - return version != null ? version.branch() : "Unknown"; + return YARN_VERSION_INFO._getBranch(); } /** @@ -78,7 +67,7 @@ public class YarnVersionInfo { * @return the compilation date in unix date format */ public static String getDate() { - return version != null ? version.date() : "Unknown"; + return YARN_VERSION_INFO._getDate(); } /** @@ -86,14 +75,14 @@ public class YarnVersionInfo { * @return the username of the user */ public static String getUser() { - return version != null ? version.user() : "Unknown"; + return YARN_VERSION_INFO._getUser(); } /** * Get the subversion URL for the root Yarn directory. */ public static String getUrl() { - return version != null ? version.url() : "Unknown"; + return YARN_VERSION_INFO._getUrl(); } /** @@ -101,7 +90,7 @@ public class YarnVersionInfo { * built. **/ public static String getSrcChecksum() { - return version != null ? version.srcChecksum() : "Unknown"; + return YARN_VERSION_INFO._getSrcChecksum(); } /** @@ -109,14 +98,11 @@ public class YarnVersionInfo { * revision, user and date. */ public static String getBuildVersion(){ - return YarnVersionInfo.getVersion() + - " from " + YarnVersionInfo.getRevision() + - " by " + YarnVersionInfo.getUser() + - " source checksum " + YarnVersionInfo.getSrcChecksum(); + return YARN_VERSION_INFO._getBuildVersion(); } public static void main(String[] args) { - LOG.debug("version: "+ version); + LOG.debug("version: "+ getVersion()); System.out.println("Yarn " + getVersion()); System.out.println("Subversion " + getUrl() + " -r " + getRevision()); System.out.println("Compiled by " + getUser() + " on " + getDate()); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index c4c6eef295b..babc2fbf8e4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.client.ClientRMSecurityInfo org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo org.apache.hadoop.yarn.security.SchedulerSecurityInfo diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index fc669de1572..233404037e5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.ContainerTokenIdentifier org.apache.hadoop.yarn.security.ApplicationTokenIdentifier org.apache.hadoop.yarn.security.client.ClientTokenIdentifier diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index c19ebc31e5e..3380cb8b019 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1,2 +1,15 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.security.ApplicationTokenIdentifier$Renewer -org.apache.hadoop.yarn.security.ContainerTokenIdentifier$Renewer \ No newline at end of file +org.apache.hadoop.yarn.security.ContainerTokenIdentifier$Renewer diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css index bee7b0d9936..b60ee7de6ae 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/dt-1.9.4/css/demo_page.css @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ /* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * General page setup @@ -90,4 +107,4 @@ .css_left { float: left; -} \ No newline at end of file +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css index 979920b642e..9455a59cd1f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.css @@ -1,3 +1,20 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one or more +* contributor license agreements. See the NOTICE file distributed with +* this work for additional information regarding copyright ownership. +* The ASF licenses this file to You under the Apache License, Version 2.0 +* (the "License"); you may not use this file except in compliance with +* the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + /* Styles for YARN */ * { margin: 0; border: 0 } html, body { height: 100% } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js index 5d78aaaf7fb..3f42c7cc2f5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/webapps/static/yarn.dt.plugins.js @@ -1,3 +1,20 @@ + +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + + if (!jQuery.fn.dataTableExt.fnVersionCheck("1.7.5")) { alert("These plugins requires dataTables 1.7.5+"); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml index ab60868380e..e28ac43e855 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml @@ -230,6 +230,17 @@ The class to use as the persistent store. yarn.resourcemanager.store.class + org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore + + + + URI pointing to the location of the FileSystem path where + RM state will be stored. This must be supplied when using + org.apache.hadoop.yarn.server.resourcemanager.recovery.FileSystemRMStateStore + as the value for yarn.resourcemanager.store.class + yarn.resourcemanager.fs.rm-state-store.uri + ${hadoop.tmp.dir}/yarn/system/rmstore + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties new file mode 100644 index 00000000000..9a8575c6dea --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-version-info.properties @@ -0,0 +1,25 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +version=${pom.version} +revision=${version-info.scm.commit} +branch=${version-info.scm.branch} +user=${user.name} +date=${version-info.build.time} +url=${version-info.scm.uri} +srcChecksum=${version-info.source.md5} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java index ade32b44c00..571d5667e34 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/java/org/apache/hadoop/yarn/server/security/BaseContainerTokenSecretManager.java @@ -199,7 +199,6 @@ public class BaseContainerTokenSecretManager extends this.readLock.unlock(); } - return BuilderUtils.newContainerToken(nodeId, ByteBuffer.wrap(password), - tokenIdentifier); + return BuilderUtils.newContainerToken(nodeId, password, tokenIdentifier); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 2e92fa85737..5a2a0095e52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.RMNMSecurityInfoClass diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml index 7a9f12cfed8..19e906cb921 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml @@ -67,6 +67,9 @@ + + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake index 1fff36131f6..4b59531d877 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/config.h.cmake @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ #ifndef CONFIG_H #define CONFIG_H diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo index 3cfe0f7fcef..c00c943ddd7 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.SecurityInfo @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerSecurityInfo diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier index 6ed6e3261e2..539028d5b0a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenIdentifier @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer.security.LocalizerTokenIdentifier diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties index 7c859535c88..bc120f14a52 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/resources/container-log4j.properties @@ -1,3 +1,16 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. See accompanying LICENSE file. +# # Define some default values that can be overridden by system properties hadoop.root.logger=DEBUG,CLA diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockApp.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockApp.java deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockContainer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/MockContainer.java deleted file mode 100644 index e69de29bb2d..00000000000 diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java index c5b48819641..f42261765fb 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java @@ -138,7 +138,14 @@ public class TestNodeManagerShutdown { containerManager.getContainerStatus(request).getStatus(); Assert.assertEquals(ContainerState.RUNNING, containerStatus.getState()); - try {Thread.sleep(5000);} catch (InterruptedException ex) {ex.printStackTrace();} + final int MAX_TRIES=20; + int numTries = 0; + while (!processStartFile.exists() && numTries < MAX_TRIES) { + try { + Thread.sleep(500); + } catch (InterruptedException ex) {ex.printStackTrace();} + numTries++; + } nm.stop(); @@ -202,7 +209,7 @@ public class TestNodeManagerShutdown { fileWriter.write("trap \"echo $hello >> " + processStartFile + "\" SIGTERM\n"); fileWriter.write("echo \"Writing pid to start file\"\n"); fileWriter.write("echo $$ >> " + processStartFile + "\n"); - fileWriter.write("while true; do\nsleep 1s;\ndone\n"); + fileWriter.write("while true; do\ndate >> /dev/null;\n done\n"); fileWriter.close(); return scriptFile; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java index a97294533cb..0e0a47200a5 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/localizer/TestLocalResourcesTrackerImpl.java @@ -1,3 +1,21 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + package org.apache.hadoop.yarn.server.nodemanager.containermanager.localizer; import static org.mockito.Mockito.any; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error index 4f3432cbb80..0a9ef9fff31 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executer-with-error @@ -1,4 +1,16 @@ #!/bin/sh +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + for PARAM in "$@" do echo $PARAM; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor index d71bd6cec86..0b5986a6a12 100755 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/resources/mock-container-executor @@ -1,4 +1,16 @@ #!/bin/sh +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + for PARAM in "$@" do echo $PARAM; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml index 39682bdb7a6..0e4fe6b02e9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/conf/capacity-scheduler.xml @@ -1,3 +1,16 @@ + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml index e015f01634e..06f58a88e9b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml @@ -41,6 +41,12 @@ org.apache.hadoop hadoop-yarn-server-web-proxy + + org.apache.hadoop + hadoop-hdfs + test-jar + test + diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java index 8b5e55aa92b..52b4d2892a4 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMAppManager.java @@ -28,19 +28,17 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.io.DataInputByteBuffer; import org.apache.hadoop.security.Credentials; import org.apache.hadoop.security.UserGroupInformation; -import org.apache.hadoop.security.token.Token; import org.apache.hadoop.util.StringUtils; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.ipc.RPCUtil; -import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.RMAuditLogger.AuditConstants; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore; -import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; @@ -236,21 +234,6 @@ public class RMAppManager implements EventHandler, RMApp application = null; try { - String clientTokenStr = null; - if (UserGroupInformation.isSecurityEnabled()) { - - // TODO: This needs to move to per-AppAttempt - this.rmContext.getClientToAMTokenSecretManager().registerApplication( - applicationId); - - Token clientToken = new - Token( - new ClientTokenIdentifier(applicationId), - this.rmContext.getClientToAMTokenSecretManager()); - clientTokenStr = clientToken.encodeToUrlString(); - LOG.debug("Sending client token as " + clientTokenStr); - } - // Sanity checks if (submissionContext.getQueue() == null) { submissionContext.setQueue(YarnConfiguration.DEFAULT_QUEUE_NAME); @@ -265,8 +248,8 @@ public class RMAppManager implements EventHandler, new RMAppImpl(applicationId, rmContext, this.conf, submissionContext.getApplicationName(), submissionContext.getUser(), submissionContext.getQueue(), - submissionContext, clientTokenStr, this.scheduler, - this.masterService, submitTime); + submissionContext, this.scheduler, this.masterService, + submitTime); // Sanity check - duplicate? if (rmContext.getRMApps().putIfAbsent(applicationId, application) != diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java index 0bec25f1cea..de682f64f9d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java @@ -237,7 +237,7 @@ public class AMLauncher implements Runnable { SecretKey clientSecretKey = this.rmContext.getClientToAMTokenSecretManager().getMasterKey( - applicationId); + application.getAppAttemptId()); String encoded = Base64.encodeBase64URLSafeString(clientSecretKey.getEncoded()); environment.put( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java new file mode 100644 index 00000000000..aca84adf0dd --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/FileSystemRMStateStore.java @@ -0,0 +1,233 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.recovery; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.classification.InterfaceAudience.Private; +import org.apache.hadoop.classification.InterfaceStability.Unstable; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FSDataInputStream; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptStateDataPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationStateDataPBImpl; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptStateDataProto; +import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationStateDataProto; +import org.apache.hadoop.yarn.util.ConverterUtils; + +import com.google.common.annotations.VisibleForTesting; + +@Private +@Unstable +/** + * A simple class for storing RM state in any storage that implements a basic + * FileSystem interface. Does not use directories so that simple key-value + * stores can be used. The retry policy for the real filesystem client must be + * configured separately to enable retry of filesystem operations when needed. + */ +public class FileSystemRMStateStore extends RMStateStore { + + public static final Log LOG = LogFactory.getLog(FileSystemRMStateStore.class); + + private static final String ROOT_DIR_NAME = "FSRMStateRoot"; + + + private FileSystem fs; + + private Path fsRootDirPath; + + @VisibleForTesting + Path fsWorkingPath; + + public synchronized void initInternal(Configuration conf) + throws Exception{ + + fsWorkingPath = new Path(conf.get(YarnConfiguration.FS_RM_STATE_STORE_URI)); + fsRootDirPath = new Path(fsWorkingPath, ROOT_DIR_NAME); + + // create filesystem + fs = fsWorkingPath.getFileSystem(conf); + fs.mkdirs(fsRootDirPath); + } + + @Override + protected synchronized void closeInternal() throws Exception { + fs.close(); + } + + @Override + public synchronized RMState loadState() throws Exception { + try { + RMState state = new RMState(); + FileStatus[] childNodes = fs.listStatus(fsRootDirPath); + List attempts = + new ArrayList(); + for(FileStatus childNodeStatus : childNodes) { + assert childNodeStatus.isFile(); + String childNodeName = childNodeStatus.getPath().getName(); + Path childNodePath = getNodePath(childNodeName); + byte[] childData = readFile(childNodePath, childNodeStatus.getLen()); + if(childNodeName.startsWith(ApplicationId.appIdStrPrefix)){ + // application + LOG.info("Loading application from node: " + childNodeName); + ApplicationId appId = ConverterUtils.toApplicationId(childNodeName); + ApplicationStateDataPBImpl appStateData = + new ApplicationStateDataPBImpl( + ApplicationStateDataProto.parseFrom(childData)); + ApplicationState appState = new ApplicationState( + appStateData.getSubmitTime(), + appStateData.getApplicationSubmissionContext()); + // assert child node name is same as actual applicationId + assert appId.equals(appState.context.getApplicationId()); + state.appState.put(appId, appState); + } else if(childNodeName.startsWith( + ApplicationAttemptId.appAttemptIdStrPrefix)) { + // attempt + LOG.info("Loading application attempt from node: " + childNodeName); + ApplicationAttemptId attemptId = + ConverterUtils.toApplicationAttemptId(childNodeName); + ApplicationAttemptStateDataPBImpl attemptStateData = + new ApplicationAttemptStateDataPBImpl( + ApplicationAttemptStateDataProto.parseFrom(childData)); + ApplicationAttemptState attemptState = new ApplicationAttemptState( + attemptId, attemptStateData.getMasterContainer()); + // assert child node name is same as application attempt id + assert attemptId.equals(attemptState.getAttemptId()); + attempts.add(attemptState); + } else { + LOG.info("Unknown child node with name: " + childNodeName); + } + } + + // go through all attempts and add them to their apps + for(ApplicationAttemptState attemptState : attempts) { + ApplicationId appId = attemptState.getAttemptId().getApplicationId(); + ApplicationState appState = state.appState.get(appId); + if(appState != null) { + appState.attempts.put(attemptState.getAttemptId(), attemptState); + } else { + // the application node may have been removed when the application + // completed but the RM might have stopped before it could remove the + // application attempt nodes + LOG.info("Application node not found for attempt: " + + attemptState.getAttemptId()); + deleteFile(getNodePath(attemptState.getAttemptId().toString())); + } + } + + return state; + } catch (Exception e) { + LOG.error("Failed to load state.", e); + throw e; + } + } + + @Override + public synchronized void storeApplicationState(String appId, + ApplicationStateDataPBImpl appStateDataPB) + throws Exception { + Path nodeCreatePath = getNodePath(appId); + + LOG.info("Storing info for app: " + appId + " at: " + nodeCreatePath); + byte[] appStateData = appStateDataPB.getProto().toByteArray(); + try { + // currently throw all exceptions. May need to respond differently for HA + // based on whether we have lost the right to write to FS + writeFile(nodeCreatePath, appStateData); + } catch (Exception e) { + LOG.info("Error storing info for app: " + appId, e); + throw e; + } + } + + @Override + public synchronized void storeApplicationAttemptState(String attemptId, + ApplicationAttemptStateDataPBImpl attemptStateDataPB) + throws Exception { + Path nodeCreatePath = getNodePath(attemptId); + LOG.info("Storing info for attempt: " + attemptId + + " at: " + nodeCreatePath); + byte[] attemptStateData = attemptStateDataPB.getProto().toByteArray(); + try { + // currently throw all exceptions. May need to respond differently for HA + // based on whether we have lost the right to write to FS + writeFile(nodeCreatePath, attemptStateData); + } catch (Exception e) { + LOG.info("Error storing info for attempt: " + attemptId, e); + throw e; + } + } + + @Override + public synchronized void removeApplicationState(ApplicationState appState) + throws Exception { + String appId = appState.getAppId().toString(); + Path nodeRemovePath = getNodePath(appId); + LOG.info("Removing info for app: " + appId + " at: " + nodeRemovePath); + deleteFile(nodeRemovePath); + for(ApplicationAttemptId attemptId : appState.attempts.keySet()) { + removeApplicationAttemptState(attemptId.toString()); + } + } + + public synchronized void removeApplicationAttemptState(String attemptId) + throws Exception { + Path nodeRemovePath = getNodePath(attemptId); + LOG.info("Removing info for attempt: " + attemptId + + " at: " + nodeRemovePath); + deleteFile(nodeRemovePath); + } + + // FileSystem related code + + private void deleteFile(Path deletePath) throws Exception { + if(!fs.delete(deletePath, true)) { + throw new Exception("Failed to delete " + deletePath); + } + } + + private byte[] readFile(Path inputPath, long len) throws Exception { + FSDataInputStream fsIn = fs.open(inputPath); + // state data will not be that "long" + byte[] data = new byte[(int)len]; + fsIn.readFully(data); + return data; + } + + private void writeFile(Path outputPath, byte[] data) throws Exception { + FSDataOutputStream fsOut = fs.create(outputPath, false); + fsOut.write(data); + fsOut.flush(); + fsOut.close(); + } + + @VisibleForTesting + Path getNodePath(String nodeName) { + return new Path(fsRootDirPath, nodeName); + } +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java index 6b614606c27..9bbdc3af045 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/NullRMStateStore.java @@ -18,10 +18,12 @@ package org.apache.hadoop.yarn.server.resourcemanager.recovery; +import org.apache.hadoop.classification.InterfaceStability.Unstable; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptStateDataPBImpl; import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationStateDataPBImpl; +@Unstable public class NullRMStateStore extends RMStateStore { @Override @@ -36,7 +38,7 @@ public class NullRMStateStore extends RMStateStore { @Override public RMState loadState() throws Exception { - return null; + throw new UnsupportedOperationException("Cannot load state from null store"); } @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java index 17651fa0773..16dff0602a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/RMAppImpl.java @@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.NodeState; @@ -49,11 +50,11 @@ import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEvent; import org.apache.hadoop.yarn.server.resourcemanager.RMAppManagerEventType; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; -import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNodeUpdateEvent.RMAppNodeUpdateType; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; import org.apache.hadoop.yarn.server.resourcemanager.recovery.Recoverable; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppNodeUpdateEvent.RMAppNodeUpdateType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; @@ -82,7 +83,6 @@ public class RMAppImpl implements RMApp, Recoverable { private final String queue; private final String name; private final ApplicationSubmissionContext submissionContext; - private final String clientTokenStr; private final Dispatcher dispatcher; private final YarnScheduler scheduler; private final ApplicationMasterService masterService; @@ -213,9 +213,9 @@ public class RMAppImpl implements RMApp, Recoverable { public RMAppImpl(ApplicationId applicationId, RMContext rmContext, Configuration config, String name, String user, String queue, - ApplicationSubmissionContext submissionContext, String clientTokenStr, - YarnScheduler scheduler, ApplicationMasterService masterService, - long submitTime) { + ApplicationSubmissionContext submissionContext, + YarnScheduler scheduler, + ApplicationMasterService masterService, long submitTime) { this.applicationId = applicationId; this.name = name; @@ -226,7 +226,6 @@ public class RMAppImpl implements RMApp, Recoverable { this.user = user; this.queue = queue; this.submissionContext = submissionContext; - this.clientTokenStr = clientTokenStr; this.scheduler = scheduler; this.masterService = masterService; this.submitTime = submitTime; @@ -402,7 +401,7 @@ public class RMAppImpl implements RMApp, Recoverable { try { ApplicationAttemptId currentApplicationAttemptId = null; - String clientToken = UNAVAILABLE; + ClientToken clientToken = null; String trackingUrl = UNAVAILABLE; String host = UNAVAILABLE; String origTrackingUrl = UNAVAILABLE; @@ -541,9 +540,9 @@ public class RMAppImpl implements RMApp, Recoverable { appAttemptId.setApplicationId(applicationId); appAttemptId.setAttemptId(attempts.size() + 1); - RMAppAttempt attempt = new RMAppAttemptImpl(appAttemptId, - clientTokenStr, rmContext, scheduler, masterService, - submissionContext, conf); + RMAppAttempt attempt = + new RMAppAttemptImpl(appAttemptId, rmContext, scheduler, masterService, + submissionContext, conf); attempts.put(appAttemptId, attempt); currentAttempt = attempt; if(startAttempt) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java index 57d78e9816f..c43d9ecbb5b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttempt.java @@ -23,10 +23,11 @@ import java.util.Set; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; @@ -91,7 +92,7 @@ public interface RMAppAttempt extends EventHandler { * The token required by the clients to talk to the application attempt * @return the token required by the clients to talk to the application attempt */ - String getClientToken(); + ClientToken getClientToken(); /** * Diagnostics information for the application attempt. diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java index c09610414be..c8bd877efb2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java @@ -39,14 +39,17 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.http.HttpConfig; import org.apache.hadoop.util.ExitUtil; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.security.token.Token; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; -import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; -import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationResourceUsageReport; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.NodeId; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.Resource; @@ -55,6 +58,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.EventHandler; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier; import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.RMContext; import org.apache.hadoop.yarn.server.resourcemanager.amlauncher.AMLauncherEvent; @@ -119,7 +123,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { private final WriteLock writeLock; private final ApplicationAttemptId applicationAttemptId; - private final String clientToken; + private ClientToken clientToken; private final ApplicationSubmissionContext submissionContext; //nodes on while this attempt's containers ran @@ -347,11 +351,10 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { .installTopology(); public RMAppAttemptImpl(ApplicationAttemptId appAttemptId, - String clientToken, RMContext rmContext, YarnScheduler scheduler, + RMContext rmContext, YarnScheduler scheduler, ApplicationMasterService masterService, ApplicationSubmissionContext submissionContext, Configuration conf) { - this.conf = conf; this.applicationAttemptId = appAttemptId; this.rmContext = rmContext; @@ -359,7 +362,19 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { this.submissionContext = submissionContext; this.scheduler = scheduler; this.masterService = masterService; - this.clientToken = clientToken; + + if (UserGroupInformation.isSecurityEnabled()) { + + this.rmContext.getClientToAMTokenSecretManager().registerApplication( + appAttemptId); + + Token token = + new Token(new ClientTokenIdentifier( + appAttemptId), this.rmContext.getClientToAMTokenSecretManager()); + this.clientToken = + BuilderUtils.newClientToken(token.getIdentifier(), token.getKind() + .toString(), token.getPassword(), token.getService().toString()); + } ReentrantReadWriteLock lock = new ReentrantReadWriteLock(); this.readLock = lock.readLock(); @@ -477,7 +492,7 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { } @Override - public String getClientToken() { + public ClientToken getClientToken() { return this.clientToken; } @@ -963,6 +978,13 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable { appAttempt.rmContext.getAMFinishingMonitor().unregister( appAttempt.getAppAttemptId()); + + // Unregister from the ClientTokenSecretManager + if (UserGroupInformation.isSecurityEnabled()) { + appAttempt.rmContext.getClientToAMTokenSecretManager() + .unRegisterApplication(appAttempt.getAppAttemptId()); + } + if(!appAttempt.submissionContext.getUnmanagedAM()) { // Tell the launcher to cleanup. appAttempt.eventHandler.handle(new AMLauncherEvent( diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java index 51d65e3969f..cc9b872724d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/SchedulerApplication.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.yarn.server.resourcemanager.scheduler; import java.util.Collection; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java index 376603e36fb..46a6fe9bb7a 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/AppSchedulable.java @@ -275,7 +275,7 @@ public class AppSchedulable extends Schedulable { // The desired container won't fit here, so reserve reserve(application, priority, node, container, reserved); - return Resources.none(); + return FairScheduler.CONTAINER_RESERVED; } } @@ -307,20 +307,27 @@ public class AppSchedulable extends Schedulable { // (not scheduled) in order to promote better locality. synchronized (app) { for (Priority priority : prioritiesToTry) { + if (app.getTotalRequiredResources(priority) <= 0) { + continue; + } + app.addSchedulingOpportunity(priority); + + ResourceRequest rackLocalRequest = app.getResourceRequest(priority, + node.getRackName()); + ResourceRequest localRequest = app.getResourceRequest(priority, + node.getHostName()); + NodeType allowedLocality = app.getAllowedLocalityLevel(priority, scheduler.getNumClusterNodes(), scheduler.getNodeLocalityThreshold(), scheduler.getRackLocalityThreshold()); - - ResourceRequest localRequest = app.getResourceRequest(priority, - node.getHostName()); - if (localRequest != null && localRequest.getNumContainers() != 0) { + + if (rackLocalRequest != null && rackLocalRequest.getNumContainers() != 0 + && localRequest != null && localRequest.getNumContainers() != 0) { return assignContainer(node, app, priority, localRequest, NodeType.NODE_LOCAL, reserved); } - ResourceRequest rackLocalRequest = app.getResourceRequest(priority, - node.getRackName()); if (rackLocalRequest != null && rackLocalRequest.getNumContainers() != 0 && (allowedLocality.equals(NodeType.RACK_LOCAL) || allowedLocality.equals(NodeType.OFF_SWITCH))) { diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java index e316bc669a3..ff51e9506a2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FSLeafQueue.java @@ -177,7 +177,10 @@ public class FSLeafQueue extends FSQueue { Collections.sort(appScheds, comparator); for (AppSchedulable sched: appScheds) { if (sched.getRunnable()) { - return sched.assignContainer(node, reserved); + Resource assignedResource = sched.assignContainer(node, reserved); + if (!assignedResource.equals(Resources.none())) { + return assignedResource; + } } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java index 3aceaff74b0..ab0f1a4f6f1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairScheduler.java @@ -56,6 +56,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMSta import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptRejectedEvent; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerEventType; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainerState; @@ -109,6 +110,10 @@ public class FairScheduler implements ResourceScheduler { private Clock clock; private static final Log LOG = LogFactory.getLog(FairScheduler.class); + + // Value that container assignment methods return when a container is + // reserved + public static final Resource CONTAINER_RESERVED = Resources.createResource(-1); // How often fair shares are re-calculated (ms) protected long UPDATE_INTERVAL = 500; @@ -498,8 +503,11 @@ public class FairScheduler implements ResourceScheduler { // Enforce ACLs UserGroupInformation userUgi = UserGroupInformation.createRemoteUser(user); if (!queue.hasAccess(QueueACL.SUBMIT_APPLICATIONS, userUgi)) { - LOG.info("User " + userUgi.getUserName() + - " cannot submit applications to queue " + queue.getName()); + String msg = "User " + userUgi.getUserName() + + " cannot submit applications to queue " + queue.getName(); + LOG.info(msg); + rmContext.getDispatcher().getEventHandler().handle( + new RMAppAttemptRejectedEvent(applicationAttemptId, msg)); return; } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java index f5706d93bfd..cf460996d8d 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/FairSchedulerConfiguration.java @@ -1,3 +1,20 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ package org.apache.hadoop.yarn.server.resourcemanager.scheduler.fair; import java.io.File; @@ -39,7 +56,7 @@ public class FairSchedulerConfiguration extends Configuration { /** Whether to assign multiple containers in one check-in. */ protected static final String ASSIGN_MULTIPLE = CONF_PREFIX + "assignmultiple"; - protected static final boolean DEFAULT_ASSIGN_MULTIPLE = true; + protected static final boolean DEFAULT_ASSIGN_MULTIPLE = false; /** Whether to give more weight to apps requiring many resources. */ protected static final String SIZE_BASED_WEIGHT = CONF_PREFIX + "sizebasedweight"; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java index 9976da53211..23c21e72f05 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/security/ClientToAMTokenSecretManagerInRM.java @@ -23,26 +23,29 @@ import java.util.Map; import javax.crypto.SecretKey; -import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.security.client.BaseClientToAMTokenSecretManager; public class ClientToAMTokenSecretManagerInRM extends BaseClientToAMTokenSecretManager { // Per application master-keys for managing client-tokens - private Map masterKeys = - new HashMap(); + private Map masterKeys = + new HashMap(); - public synchronized void registerApplication(ApplicationId applicationID) { - this.masterKeys.put(applicationID, generateSecret()); + public synchronized void registerApplication( + ApplicationAttemptId applicationAttemptID) { + this.masterKeys.put(applicationAttemptID, generateSecret()); } - public synchronized void unRegisterApplication(ApplicationId applicationID) { - this.masterKeys.remove(applicationID); + public synchronized void unRegisterApplication( + ApplicationAttemptId applicationAttemptID) { + this.masterKeys.remove(applicationAttemptID); } @Override - public synchronized SecretKey getMasterKey(ApplicationId applicationID) { - return this.masterKeys.get(applicationID); + public synchronized SecretKey getMasterKey( + ApplicationAttemptId applicationAttemptID) { + return this.masterKeys.get(applicationAttemptID); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java index bee1cfd9866..9a74ef4fe06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/FairSchedulerLeafQueueInfo.java @@ -1,3 +1,21 @@ +/** +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, software +* distributed under the License is distributed on an "AS IS" BASIS, +* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +* See the License for the specific language governing permissions and +* limitations under the License. +*/ + package org.apache.hadoop.yarn.server.resourcemanager.webapp.dao; import java.util.Collection; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java index cabbe6ace4a..1b778f2d4c9 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestApplicationACLs.java @@ -338,7 +338,7 @@ public class TestApplicationACLs { Assert.assertEquals("Enemy should not see app rpc port!", -1, appReport.getRpcPort()); Assert.assertEquals("Enemy should not see app client token!", - UNAVAILABLE, appReport.getClientToken()); + null, appReport.getClientToken()); Assert.assertEquals("Enemy should not see app diagnostics!", UNAVAILABLE, appReport.getDiagnostics()); Assert.assertEquals("Enemy should not see app tracking url!", diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java index 2157c0b1cfd..8479c2c87a3 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java @@ -18,10 +18,10 @@ package org.apache.hadoop.yarn.server.resourcemanager; -import static org.mockito.Mockito.mock; -import static org.mockito.Mockito.when; import static org.mockito.Matchers.anyBoolean; import static org.mockito.Matchers.anyString; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; import java.io.IOException; import java.net.InetSocketAddress; @@ -276,7 +276,7 @@ public class TestClientRMService { private RMAppImpl getRMApp(RMContext rmContext, YarnScheduler yarnScheduler, ApplicationId applicationId3, YarnConfiguration config, String queueName) { return new RMAppImpl(applicationId3, rmContext, config, null, null, - queueName, null, null, yarnScheduler, null, System + queueName, null, yarnScheduler, null , System .currentTimeMillis()); } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java index 088eca9a2b4..eea5c6558fc 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/applicationsmanager/MockAsm.java @@ -29,6 +29,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationMaster; import org.apache.hadoop.yarn.api.records.ApplicationReport; import org.apache.hadoop.yarn.api.records.ApplicationStatus; import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.api.records.Container; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; @@ -77,7 +78,7 @@ public abstract class MockAsm extends MockApps { } @Override - public String getClientToken() { + public ClientToken getClientToken() { throw new UnsupportedOperationException("Not supported yet."); } @@ -127,7 +128,7 @@ public abstract class MockAsm extends MockApps { } @Override - public void setClientToken(String clientToken) { + public void setClientToken(ClientToken clientToken) { throw new UnsupportedOperationException("Not supported yet."); } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java new file mode 100644 index 00000000000..440908feacb --- /dev/null +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/recovery/TestRMStateStore.java @@ -0,0 +1,284 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.yarn.server.resourcemanager.recovery; + +import static org.junit.Assert.*; +import static org.mockito.Mockito.*; + +import java.util.HashMap; +import java.util.Map; + +import org.junit.Test; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileStatus; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hdfs.HdfsConfiguration; +import org.apache.hadoop.hdfs.MiniDFSCluster; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; +import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; +import org.apache.hadoop.yarn.api.records.Container; +import org.apache.hadoop.yarn.api.records.ContainerId; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; +import org.apache.hadoop.yarn.api.records.impl.pb.ContainerPBImpl; +import org.apache.hadoop.yarn.conf.YarnConfiguration; +import org.apache.hadoop.yarn.event.Dispatcher; +import org.apache.hadoop.yarn.event.EventHandler; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationAttemptState; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.ApplicationState; +import org.apache.hadoop.yarn.server.resourcemanager.recovery.RMStateStore.RMState; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttempt; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.event.RMAppAttemptStoredEvent; +import org.apache.hadoop.yarn.util.ConverterUtils; + +public class TestRMStateStore { + + public static final Log LOG = LogFactory.getLog(TestRMStateStore.class); + + class TestDispatcher implements Dispatcher, EventHandler { + + ApplicationAttemptId attemptId; + Exception storedException; + + boolean notified = false; + + @SuppressWarnings("rawtypes") + @Override + public void register(Class eventType, EventHandler handler) { + } + + @Override + public void handle(RMAppAttemptStoredEvent event) { + assertEquals(attemptId, event.getApplicationAttemptId()); + assertEquals(storedException, event.getStoredException()); + notified = true; + synchronized (this) { + notifyAll(); + } + } + + @SuppressWarnings("rawtypes") + @Override + public EventHandler getEventHandler() { + return this; + } + + } + + interface RMStateStoreHelper { + RMStateStore getRMStateStore() throws Exception; + void addOrphanAttemptIfNeeded(RMStateStore testStore, + TestDispatcher dispatcher) throws Exception; + boolean isFinalStateValid() throws Exception; + } + + @Test + public void testFSRMStateStore() throws Exception { + HdfsConfiguration conf = new HdfsConfiguration(); + MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build(); + try { + TestFSRMStateStoreTester fsTester = new TestFSRMStateStoreTester(cluster); + testRMStateStore(fsTester); + } finally { + cluster.shutdown(); + } + } + + class TestFSRMStateStoreTester implements RMStateStoreHelper { + Path workingDirPathURI; + FileSystemRMStateStore store; + MiniDFSCluster cluster; + + class TestFileSystemRMStore extends FileSystemRMStateStore { + TestFileSystemRMStore(Configuration conf) throws Exception { + init(conf); + assertTrue(workingDirPathURI.equals(fsWorkingPath)); + } + } + + public TestFSRMStateStoreTester(MiniDFSCluster cluster) throws Exception { + Path workingDirPath = new Path("/Test"); + this.cluster = cluster; + FileSystem fs = cluster.getFileSystem(); + fs.mkdirs(workingDirPath); + Path clusterURI = new Path(cluster.getURI()); + workingDirPathURI = new Path(clusterURI, workingDirPath); + fs.close(); + } + + @Override + public RMStateStore getRMStateStore() throws Exception { + YarnConfiguration conf = new YarnConfiguration(); + conf.set(YarnConfiguration.FS_RM_STATE_STORE_URI, workingDirPathURI.toString()); + this.store = new TestFileSystemRMStore(conf); + return store; + } + + @Override + public void addOrphanAttemptIfNeeded(RMStateStore testStore, + TestDispatcher dispatcher) throws Exception { + ApplicationAttemptId attemptId = ConverterUtils.toApplicationAttemptId( + "appattempt_1352994193343_0003_000001"); + storeAttempt(testStore, attemptId, + "container_1352994193343_0003_01_000001", dispatcher); + } + + @Override + public boolean isFinalStateValid() throws Exception { + FileSystem fs = cluster.getFileSystem(); + FileStatus[] files = fs.listStatus(workingDirPathURI); + if(files.length == 1) { + // only store root directory should exist + return true; + } + return false; + } + } + + void waitNotify(TestDispatcher dispatcher) { + long startTime = System.currentTimeMillis(); + while(!dispatcher.notified) { + synchronized (dispatcher) { + try { + dispatcher.wait(1000); + } catch (InterruptedException e) { + e.printStackTrace(); + } + } + if(System.currentTimeMillis() - startTime > 1000*60) { + fail("Timed out attempt store notification"); + } + } + dispatcher.notified = false; + } + + void storeApp(RMStateStore store, ApplicationId appId, long time) + throws Exception { + ApplicationSubmissionContext context = new ApplicationSubmissionContextPBImpl(); + context.setApplicationId(appId); + + RMApp mockApp = mock(RMApp.class); + when(mockApp.getApplicationId()).thenReturn(appId); + when(mockApp.getSubmitTime()).thenReturn(time); + when(mockApp.getApplicationSubmissionContext()).thenReturn(context); + store.storeApplication(mockApp); + } + + ContainerId storeAttempt(RMStateStore store, ApplicationAttemptId attemptId, + String containerIdStr, TestDispatcher dispatcher) + throws Exception { + + Container container = new ContainerPBImpl(); + container.setId(ConverterUtils.toContainerId(containerIdStr)); + RMAppAttempt mockAttempt = mock(RMAppAttempt.class); + when(mockAttempt.getAppAttemptId()).thenReturn(attemptId); + when(mockAttempt.getMasterContainer()).thenReturn(container); + dispatcher.attemptId = attemptId; + dispatcher.storedException = null; + store.storeApplicationAttempt(mockAttempt); + waitNotify(dispatcher); + return container.getId(); + } + + void testRMStateStore(RMStateStoreHelper stateStoreHelper) throws Exception { + long submitTime = System.currentTimeMillis(); + RMStateStore store = stateStoreHelper.getRMStateStore(); + TestDispatcher dispatcher = new TestDispatcher(); + store.setDispatcher(dispatcher); + + ApplicationAttemptId attemptId1 = ConverterUtils + .toApplicationAttemptId("appattempt_1352994193343_0001_000001"); + ApplicationId appId1 = attemptId1.getApplicationId(); + storeApp(store, appId1, submitTime); + ContainerId containerId1 = storeAttempt(store, attemptId1, + "container_1352994193343_0001_01_000001", dispatcher); + String appAttemptIdStr2 = "appattempt_1352994193343_0001_000002"; + ApplicationAttemptId attemptId2 = + ConverterUtils.toApplicationAttemptId(appAttemptIdStr2); + ContainerId containerId2 = storeAttempt(store, attemptId2, + "container_1352994193343_0001_02_000001", dispatcher); + + ApplicationAttemptId attemptIdRemoved = ConverterUtils + .toApplicationAttemptId("appattempt_1352994193343_0002_000001"); + ApplicationId appIdRemoved = attemptIdRemoved.getApplicationId(); + storeApp(store, appIdRemoved, submitTime); + storeAttempt(store, attemptIdRemoved, + "container_1352994193343_0002_01_000001", dispatcher); + + RMApp mockRemovedApp = mock(RMApp.class); + HashMap attempts = + new HashMap(); + ApplicationSubmissionContext context = new ApplicationSubmissionContextPBImpl(); + context.setApplicationId(appIdRemoved); + when(mockRemovedApp.getSubmitTime()).thenReturn(submitTime); + when(mockRemovedApp.getApplicationSubmissionContext()).thenReturn(context); + when(mockRemovedApp.getAppAttempts()).thenReturn(attempts); + RMAppAttempt mockRemovedAttempt = mock(RMAppAttempt.class); + when(mockRemovedAttempt.getAppAttemptId()).thenReturn(attemptIdRemoved); + attempts.put(attemptIdRemoved, mockRemovedAttempt); + store.removeApplication(mockRemovedApp); + + // add orphan attempt file to simulate incomplete removal of app state + stateStoreHelper.addOrphanAttemptIfNeeded(store, dispatcher); + + // let things settle down + Thread.sleep(1000); + store.close(); + + // load state + store = stateStoreHelper.getRMStateStore(); + RMState state = store.loadState(); + Map rmAppState = state.getApplicationState(); + + // removed app or orphan attempt is not loaded + assertEquals(1, rmAppState.size()); + + ApplicationState appState = rmAppState.get(appId1); + // app is loaded + assertNotNull(appState); + // app is loaded correctly + assertEquals(submitTime, appState.getSubmitTime()); + // submission context is loaded correctly + assertEquals(appId1, + appState.getApplicationSubmissionContext().getApplicationId()); + ApplicationAttemptState attemptState = appState.getAttempt(attemptId1); + // attempt1 is loaded correctly + assertNotNull(attemptState); + assertEquals(attemptId1, attemptState.getAttemptId()); + // attempt1 container is loaded correctly + assertEquals(containerId1, attemptState.getMasterContainer().getId()); + attemptState = appState.getAttempt(attemptId2); + // attempt2 is loaded correctly + assertNotNull(attemptState); + assertEquals(attemptId2, attemptState.getAttemptId()); + // attempt2 container is loaded correctly + assertEquals(containerId2, attemptState.getMasterContainer().getId()); + + // assert store is in expected state after everything is cleaned + assertTrue(stateStoreHelper.isFinalStateValid()); + + store.close(); + } + +} diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java index 0f20cb39797..cccbc1c91e2 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/TestRMAppTransitions.java @@ -168,7 +168,6 @@ public class TestRMAppTransitions { Configuration conf = new YarnConfiguration(); // ensure max retries set to known value conf.setInt(YarnConfiguration.RM_AM_MAX_RETRIES, maxRetries); - String clientTokenStr = "bogusstring"; YarnScheduler scheduler = mock(YarnScheduler.class); ApplicationMasterService masterService = new ApplicationMasterService(rmContext, scheduler); @@ -177,11 +176,10 @@ public class TestRMAppTransitions { submissionContext = new ApplicationSubmissionContextPBImpl(); } - RMApp application = new RMAppImpl(applicationId, rmContext, - conf, name, user, - queue, submissionContext, clientTokenStr, - scheduler, - masterService, System.currentTimeMillis()); + RMApp application = + new RMAppImpl(applicationId, rmContext, conf, name, user, queue, + submissionContext, scheduler, masterService, + System.currentTimeMillis()); testAppStartState(applicationId, user, name, queue, application); return application; diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java index f944744f2d2..25a4b968fd1 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java @@ -209,9 +209,9 @@ public class TestRMAppAttemptTransitions { unmanagedAM = false; application = mock(RMApp.class); - applicationAttempt = - new RMAppAttemptImpl(applicationAttemptId, null, rmContext, scheduler, - masterService, submissionContext, new Configuration()); + applicationAttempt = + new RMAppAttemptImpl(applicationAttemptId, rmContext, scheduler, + masterService, submissionContext, new Configuration()); when(application.getCurrentAppAttempt()).thenReturn(applicationAttempt); when(application.getApplicationId()).thenReturn(applicationId); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java index da53e39220c..a0e17588e03 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java @@ -41,20 +41,29 @@ import junit.framework.Assert; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.security.authorize.AccessControlList; import org.apache.hadoop.yarn.Clock; +import org.apache.hadoop.yarn.MockApps; import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; +import org.apache.hadoop.yarn.api.records.ApplicationSubmissionContext; import org.apache.hadoop.yarn.api.records.ContainerId; import org.apache.hadoop.yarn.api.records.ContainerStatus; +import org.apache.hadoop.yarn.api.records.FinalApplicationStatus; import org.apache.hadoop.yarn.api.records.Priority; import org.apache.hadoop.yarn.api.records.QueueACL; import org.apache.hadoop.yarn.api.records.ResourceRequest; +import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationSubmissionContextPBImpl; import org.apache.hadoop.yarn.conf.YarnConfiguration; import org.apache.hadoop.yarn.event.AsyncDispatcher; import org.apache.hadoop.yarn.factories.RecordFactory; import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider; +import org.apache.hadoop.yarn.server.resourcemanager.ApplicationMasterService; import org.apache.hadoop.yarn.server.resourcemanager.MockNodes; import org.apache.hadoop.yarn.server.resourcemanager.ResourceManager; import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEvent; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppEventType; +import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppImpl; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.attempt.RMAppAttemptState; import org.apache.hadoop.yarn.server.resourcemanager.rmcontainer.RMContainer; import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode; @@ -1275,4 +1284,136 @@ public class TestFairScheduler { FSSchedulerApp app2 = scheduler.applications.get(attId2); assertNull("The application was allowed", app2); } + + @Test + public void testMultipleNodesSingleRackRequest() throws Exception { + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node2 = MockNodes.newNodeInfo(1, Resources.createResource(1024)); + RMNode node3 = MockNodes.newNodeInfo(2, Resources.createResource(1024)); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + NodeAddedSchedulerEvent nodeEvent2 = new NodeAddedSchedulerEvent(node2); + scheduler.handle(nodeEvent2); + + ApplicationAttemptId appId = createAppAttemptId(this.APP_ID++, this.ATTEMPT_ID++); + scheduler.addApplication(appId, "queue1", "user1"); + + // 1 request with 2 nodes on the same rack. another request with 1 node on + // a different rack + List asks = new ArrayList(); + asks.add(createResourceRequest(1024, node1.getHostName(), 1, 1)); + asks.add(createResourceRequest(1024, node2.getHostName(), 1, 1)); + asks.add(createResourceRequest(1024, node3.getHostName(), 1, 1)); + asks.add(createResourceRequest(1024, node1.getRackName(), 1, 1)); + asks.add(createResourceRequest(1024, node3.getRackName(), 1, 1)); + asks.add(createResourceRequest(1024, RMNode.ANY, 1, 2)); + + scheduler.allocate(appId, asks, new ArrayList()); + + // node 1 checks in + scheduler.update(); + NodeUpdateSchedulerEvent updateEvent1 = new NodeUpdateSchedulerEvent(node1, + new ArrayList(), new ArrayList()); + scheduler.handle(updateEvent1); + // should assign node local + assertEquals(1, scheduler.applications.get(appId).getLiveContainers().size()); + + // node 2 checks in + scheduler.update(); + NodeUpdateSchedulerEvent updateEvent2 = new NodeUpdateSchedulerEvent(node2, + new ArrayList(), new ArrayList()); + scheduler.handle(updateEvent2); + // should assign rack local + assertEquals(2, scheduler.applications.get(appId).getLiveContainers().size()); + } + + @Test + public void testFifoWithinQueue() throws Exception { + RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(3072)); + NodeAddedSchedulerEvent nodeEvent1 = new NodeAddedSchedulerEvent(node1); + scheduler.handle(nodeEvent1); + + // Even if submitted at exact same time, apps will be deterministically + // ordered by name. + ApplicationAttemptId attId1 = createSchedulingRequest(1024, "queue1", + "user1", 2); + ApplicationAttemptId attId2 = createSchedulingRequest(1024, "queue1", + "user1", 2); + FSSchedulerApp app1 = scheduler.applications.get(attId1); + FSSchedulerApp app2 = scheduler.applications.get(attId2); + + FSLeafQueue queue1 = scheduler.getQueueManager().getLeafQueue("queue1"); + queue1.setSchedulingMode(SchedulingMode.FIFO); + + scheduler.update(); + + // First two containers should go to app 1, third should go to app 2. + // Because tests set assignmultiple to false, each heartbeat assigns a single + // container. + + NodeUpdateSchedulerEvent updateEvent = new NodeUpdateSchedulerEvent(node1, + new ArrayList(), new ArrayList()); + + scheduler.handle(updateEvent); + assertEquals(1, app1.getLiveContainers().size()); + assertEquals(0, app2.getLiveContainers().size()); + + scheduler.handle(updateEvent); + assertEquals(2, app1.getLiveContainers().size()); + assertEquals(0, app2.getLiveContainers().size()); + + scheduler.handle(updateEvent); + assertEquals(2, app1.getLiveContainers().size()); + assertEquals(1, app2.getLiveContainers().size()); + } + + + @SuppressWarnings("unchecked") + @Test + public void testNotAllowSubmitApplication() throws Exception { + // Set acl's + Configuration conf = createConfiguration(); + conf.set(FairSchedulerConfiguration.ALLOCATION_FILE, ALLOC_FILE); + scheduler.reinitialize(conf, resourceManager.getRMContext()); + PrintWriter out = new PrintWriter(new FileWriter(ALLOC_FILE)); + out.println(""); + out.println(""); + out.println(""); + out.println("userallow"); + out.println(""); + out.println(""); + out.close(); + QueueManager queueManager = scheduler.getQueueManager(); + queueManager.initialize(); + + int appId = this.APP_ID++; + String user = "usernotallow"; + String queue = "queue1"; + ApplicationId applicationId = MockApps.newAppID(appId); + String name = MockApps.newAppName(); + ApplicationMasterService masterService = + new ApplicationMasterService(resourceManager.getRMContext(), scheduler); + ApplicationSubmissionContext submissionContext = new ApplicationSubmissionContextPBImpl(); + RMApp application = + new RMAppImpl(applicationId, resourceManager.getRMContext(), conf, name, user, + queue, submissionContext, scheduler, masterService, + System.currentTimeMillis()); + resourceManager.getRMContext().getRMApps().putIfAbsent(applicationId, application); + application.handle(new RMAppEvent(applicationId, RMAppEventType.START)); + + ApplicationAttemptId attId = recordFactory.newRecordInstance(ApplicationAttemptId.class); + attId.setAttemptId(this.ATTEMPT_ID++); + attId.setApplicationId(applicationId); + scheduler.addApplication(attId, queue, user); + + final int MAX_TRIES=20; + int numTries = 0; + while (application.getFinishTime() == 0 && numTries < MAX_TRIES) { + try { + Thread.sleep(100); + } catch (InterruptedException ex) {ex.printStackTrace();} + numTries++; + } + assertEquals(FinalApplicationStatus.FAILED, application.getFinalApplicationStatus()); + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java index 032e6c28f61..3f02fd2ee01 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java @@ -52,8 +52,10 @@ import org.apache.hadoop.yarn.api.protocolrecords.StartContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StartContainerResponse; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest; import org.apache.hadoop.yarn.api.protocolrecords.StopContainerResponse; +import org.apache.hadoop.yarn.api.records.ApplicationAttemptId; import org.apache.hadoop.yarn.api.records.ApplicationId; import org.apache.hadoop.yarn.api.records.ApplicationReport; +import org.apache.hadoop.yarn.api.records.ClientToken; import org.apache.hadoop.yarn.event.Dispatcher; import org.apache.hadoop.yarn.event.DrainDispatcher; import org.apache.hadoop.yarn.exceptions.YarnRemoteException; @@ -67,6 +69,7 @@ import org.apache.hadoop.yarn.server.resourcemanager.MockRMWithCustomAMLauncher; import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMApp; import org.apache.hadoop.yarn.service.AbstractService; import org.apache.hadoop.yarn.util.BuilderUtils; +import org.apache.hadoop.yarn.util.ProtoUtils; import org.apache.hadoop.yarn.util.Records; import org.junit.Test; @@ -106,14 +109,14 @@ public class TestClientTokens { private static class CustomAM extends AbstractService implements CustomProtocol { - private final ApplicationId appId; + private final ApplicationAttemptId appAttemptId; private final String secretKey; private InetSocketAddress address; private boolean pinged = false; - public CustomAM(ApplicationId appId, String secretKeyStr) { + public CustomAM(ApplicationAttemptId appId, String secretKeyStr) { super("CustomAM"); - this.appId = appId; + this.appAttemptId = appId; this.secretKey = secretKeyStr; } @@ -128,7 +131,7 @@ public class TestClientTokens { ClientToAMTokenSecretManager secretManager = null; byte[] bytes = Base64.decodeBase64(this.secretKey); - secretManager = new ClientToAMTokenSecretManager(this.appId, bytes); + secretManager = new ClientToAMTokenSecretManager(this.appAttemptId, bytes); Server server; try { server = @@ -216,7 +219,7 @@ public class TestClientTokens { GetApplicationReportResponse reportResponse = rm.getClientRMService().getApplicationReport(request); ApplicationReport appReport = reportResponse.getApplicationReport(); - String clientTokenEncoded = appReport.getClientToken(); + ClientToken clientToken = appReport.getClientToken(); // Wait till AM is 'launched' int waitTime = 0; @@ -226,9 +229,11 @@ public class TestClientTokens { Assert.assertNotNull(containerManager.clientTokensSecret); // Start the AM with the correct shared-secret. + ApplicationAttemptId appAttemptId = + app.getAppAttempts().keySet().iterator().next(); + Assert.assertNotNull(appAttemptId); final CustomAM am = - new CustomAM(app.getApplicationId(), - containerManager.clientTokensSecret); + new CustomAM(appAttemptId, containerManager.clientTokensSecret); am.init(conf); am.start(); @@ -249,21 +254,19 @@ public class TestClientTokens { // Verify denial for a malicious user UserGroupInformation ugi = UserGroupInformation.createRemoteUser("me"); - Token clientToken = - new Token(); - clientToken.decodeFromUrlString(clientTokenEncoded); - // RPC layer client expects ip:port as service for tokens - SecurityUtil.setTokenService(clientToken, am.address); + Token token = + ProtoUtils.convertFromProtoFormat(clientToken, am.address); // Malicious user, messes with appId ClientTokenIdentifier maliciousID = - new ClientTokenIdentifier(BuilderUtils.newApplicationId(app - .getApplicationId().getClusterTimestamp(), 42)); + new ClientTokenIdentifier(BuilderUtils.newApplicationAttemptId( + BuilderUtils.newApplicationId(app.getApplicationId() + .getClusterTimestamp(), 42), 43)); Token maliciousToken = new Token(maliciousID.getBytes(), - clientToken.getPassword(), clientToken.getKind(), - clientToken.getService()); + token.getPassword(), token.getKind(), + token.getService()); ugi.addToken(maliciousToken); try { @@ -297,7 +300,7 @@ public class TestClientTokens { // Now for an authenticated user ugi = UserGroupInformation.createRemoteUser("me"); - ugi.addToken(clientToken); + ugi.addToken(token); ugi.doAs(new PrivilegedExceptionAction() { @Override diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer index 24c0713a5d5..f48ffa80b4f 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer @@ -1 +1,14 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# org.apache.hadoop.yarn.server.resourcemanager.security.TestDelegationTokenRenewer$Renewer diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java index 6a2bbb7a359..cec30d1c55b 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxy.java @@ -114,4 +114,13 @@ public class WebAppProxy extends AbstractService { } super.stop(); } + + public void join() { + if(proxyServer != null) { + try { + proxyServer.join(); + } catch (InterruptedException e) { + } + } + } } diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java index 52d2dc6ade0..c824cfb6c06 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServer.java @@ -73,6 +73,14 @@ public class WebAppProxyServer extends CompositeService { YarnConfiguration.PROXY_PRINCIPAL); } + /** + * Wait for service to finish. + * (Normally, it runs forever.) + */ + private void join() { + proxy.join(); + } + public static void main(String[] args) { Thread.setDefaultUncaughtExceptionHandler(new YarnUncaughtExceptionHandler()); StringUtils.startupShutdownMessage(WebAppProxyServer.class, args, LOG); @@ -84,6 +92,7 @@ public class WebAppProxyServer extends CompositeService { YarnConfiguration conf = new YarnConfiguration(); proxy.init(conf); proxy.start(); + proxy.join(); } catch (Throwable t) { LOG.fatal("Error starting Proxy server", t); System.exit(-1); diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm index 988c42dfe11..2d12699bf73 100644 --- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm +++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-site/src/site/apt/FairScheduler.apt.vm @@ -132,20 +132,45 @@ Hadoop MapReduce Next Generation - Fair Scheduler * Whether to use the username associated with the allocation as the default queue name, in the event that a queue name is not specified. If this is set to "false" or unset, all jobs have a shared default queue, called "default". + Defaults to true. * <<>> * Whether to use preemption. Note that preemption is experimental in the current - version. + version. Defaults to false. * <<>> * Whether to assign shares to individual apps based on their size, rather than - providing an equal share to all apps regardless of size. + providing an equal share to all apps regardless of size. Defaults to false. * <<>> - * Whether to allow multiple container assignments in one heartbeat. + * Whether to allow multiple container assignments in one heartbeat. Defaults + to false. + + * <<>> + + * If assignmultiple is true, the maximum amount of containers that can be + assigned in one heartbeat. Defaults to -1, which sets no limit. + + * <<>> + + * For applications that request containers on particular nodes, the number of + scheduling opportunities since the last container assignment to wait before + accepting a placement on another node. Expressed as a float between 0 and 1, + which, as a fraction of the cluster size, is the number of scheduling + opportunities to pass up. The default value of -1.0 means don't pass up any + scheduling opportunities. + + * <<>> + + * For applications that request containers on particular racks, the number of + scheduling opportunities since the last container assignment to wait before + accepting a placement on another rack. Expressed as a float between 0 and 1, + which, as a fraction of the cluster size, is the number of scheduling + opportunities to pass up. The default value of -1.0 means don't pass up any + scheduling opportunities. Allocation file format @@ -166,6 +191,14 @@ Allocation file format * schedulingMode: either "fifo" or "fair" depending on the in-queue scheduling policy desired + * aclSubmitApps: a list of users that can submit apps to the queue. A (default) + value of "*" means that any users can submit apps. A queue inherits the ACL of + its parent, so if a queue2 descends from queue1, and user1 is in queue1's ACL, + and user2 is in queue2's ACL, then both users may submit to queue2. + + * minSharePreemptionTimeout: number of seconds the queue is under its minimum share + before it will try to preempt containers to take resources from other queues. + * <>, which represent settings governing the behavior of individual users. They can contain a single property: maxRunningApps, a limit on the number of running apps for a particular user. @@ -173,6 +206,10 @@ Allocation file format * <>, which sets the default running app limit for any users whose limit is not otherwise specified. + * <>, number of seconds a queue is under + its fair share before it will try to preempt containers to take resources from + other queues. + An example allocation file is given here: --- diff --git a/hadoop-yarn-project/hadoop-yarn/pom.xml b/hadoop-yarn-project/hadoop-yarn/pom.xml index 559be4f4df4..5d261a2804d 100644 --- a/hadoop-yarn-project/hadoop-yarn/pom.xml +++ b/hadoop-yarn-project/hadoop-yarn/pom.xml @@ -149,9 +149,10 @@ org.apache.rat apache-rat-plugin - - pom.xml - + + conf/slaves + conf/container-executor.cfg + diff --git a/hadoop-yarn-project/pom.xml b/hadoop-yarn-project/pom.xml index c1e38967c5d..a0eed4838c2 100644 --- a/hadoop-yarn-project/pom.xml +++ b/hadoop-yarn-project/pom.xml @@ -213,9 +213,9 @@ org.apache.rat apache-rat-plugin - - pom.xml - + + CHANGES.txt + diff --git a/pom.xml b/pom.xml index 0d97191b955..60ebc378221 100644 --- a/pom.xml +++ b/pom.xml @@ -36,12 +36,12 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - apache.staging.https - Apache Release Distribution Repository - https://repository.apache.org/service/local/staging/deploy/maven2 + ${distMgmtStagingId} + ${distMgmtStagingName} + ${distMgmtStagingUrl} - apache.snapshots.https + ${distMgmtSnapshotsId} ${distMgmtSnapshotsName} ${distMgmtSnapshotsUrl} @@ -53,7 +53,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs - apache.snapshots.https + ${distMgmtSnapshotsId} ${distMgmtSnapshotsName} ${distMgmtSnapshotsUrl} @@ -79,14 +79,19 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs + apache.snapshots.https Apache Development Snapshot Repository https://repository.apache.org/content/repositories/snapshots + apache.staging.https + Apache Release Distribution Repository + https://repository.apache.org/service/local/staging/deploy/maven2 hadoop-project hadoop-project-dist hadoop-assemblies + hadoop-maven-plugins hadoop-common-project hadoop-hdfs-project hadoop-yarn-project @@ -294,12 +299,14 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs org.apache.rat apache-rat-plugin - - - dev-support/* - pom.xml - - + + + .gitattributes + .gitignore + .git/** + .idea/** + + maven-site-plugin