diff --git a/.gitignore b/.gitignore
index e5a919d7e7a..f223254b16a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -7,3 +7,4 @@
.project
.settings
target
+hadoop-hdfs-project/hadoop-hdfs/downloads
diff --git a/BUILDING.txt b/BUILDING.txt
index d0b9aa28941..662bd2577a3 100644
--- a/BUILDING.txt
+++ b/BUILDING.txt
@@ -1,5 +1,4 @@
-
-Build instructions for Hadoop Common/HDFS using Maven
+Build instructions for Hadoop
----------------------------------------------------------------------------------
Requirements:
@@ -9,19 +8,24 @@ Requirements:
* Maven 3.0
* Forrest 0.8 (if generating docs)
* Findbugs 1.3.9 (if running findbugs)
+* ProtocolBuffer 2.4.1+ (for MapReduce)
* Autotools (if compiling native code)
* Internet connection for first build (to fetch all Maven and Hadoop dependencies)
----------------------------------------------------------------------------------
-Maven modules:
+Maven main modules:
- hadoop (Main Hadoop project)
- - hadoop-project (Parent POM for all Hadoop Maven modules. )
- (All plugins & dependencies versions are defined here.)
- - hadoop-project-dist (Parent POM for modules that generate distributions.)
- - hadoop-annotations (Generates the Hadoop doclet used to generated the Javadocs)
- - hadoop-common (Hadoop Common)
- - hadoop-hdfs (Hadoop HDFS)
+ hadoop (Main Hadoop project)
+ - hadoop-project (Parent POM for all Hadoop Maven modules. )
+ (All plugins & dependencies versions are defined here.)
+ - hadoop-project-dist (Parent POM for modules that generate distributions.)
+ - hadoop-annotations (Generates the Hadoop doclet used to generated the Javadocs)
+ - hadoop-assemblies (Maven assemblies used by the different modules)
+ - hadoop-common-project (Hadoop Common)
+ - hadoop-hdfs-project (Hadoop HDFS)
+ - hadoop-mapreduce-project (Hadoop MapReduce)
+ - hadoop-tools (Hadoop tools like Streaming, Distcp, etc.)
+ - hadoop-dist (Hadoop distribution assembler)
----------------------------------------------------------------------------------
Where to run Maven from?
@@ -45,6 +49,7 @@ Maven build goals:
* Run Rat : mvn apache-rat:check
* Build javadocs : mvn javadoc:javadoc
* Build distribution : mvn package [-Pdist][-Pdocs][-Psrc][-Pnative][-Dtar]
+ * Change Hadoop version : mvn versions:set -DnewVersion=NEWVERSION
Build options:
@@ -52,15 +57,34 @@ Maven build goals:
* Use -Dsnappy.prefix=(/usr/local) & -Dbundle.snappy=(false) to compile
Snappy JNI bindings and to bundle Snappy SO files
* Use -Pdocs to generate & bundle the documentation in the distribution (using -Pdist)
- * Use -Psrc to bundle the source in the distribution (using -Pdist)
+ * Use -Psrc to create a project source TAR.GZ
* Use -Dtar to create a TAR with the distribution (using -Pdist)
Tests options:
* Use -DskipTests to skip tests when running the following Maven goals:
'package', 'install', 'deploy' or 'verify'
- * -Dtest=,....
+ * -Dtest=,,....
* -Dtest.exclude=
* -Dtest.exclude.pattern=**/.java,**/.java
----------------------------------------------------------------------------------
+Building distributions:
+
+Create binary distribution without native code and without documentation:
+
+ $ mvn package -Pdist -DskipTests -Dtar
+
+Create binary distribution with native code and with documentation:
+
+ $ mvn package -Pdist,native,docs -DskipTests -Dtar
+
+Create source distribution:
+
+ $ mvn package -Psrc -DskipTests
+
+Create source and binary distributions with native code and documentation:
+
+ $ mvn package -Pdist,native,docs,src -DskipTests -Dtar
+
+----------------------------------------------------------------------------------
diff --git a/common/src/test/bin/smart-apply-patch.sh b/common/src/test/bin/smart-apply-patch.sh
deleted file mode 100755
index 3334c2bd882..00000000000
--- a/common/src/test/bin/smart-apply-patch.sh
+++ /dev/null
@@ -1,72 +0,0 @@
-#!/usr/bin/env bash
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-set -e
-
-PATCH_FILE=$1
-if [ -z "$PATCH_FILE" ]; then
- echo usage: $0 patch-file
- exit 1
-fi
-
-PATCH=${PATCH:-patch} # allow overriding patch binary
-
-# Cleanup handler for temporary files
-TOCLEAN=""
-cleanup() {
- rm $TOCLEAN
- exit $1
-}
-trap "cleanup 1" HUP INT QUIT TERM
-
-# Allow passing "-" for stdin patches
-if [ "$PATCH_FILE" == "-" ]; then
- PATCH_FILE=/tmp/tmp.in.$$
- cat /dev/fd/0 > $PATCH_FILE
- TOCLEAN="$TOCLEAN $PATCH_FILE"
-fi
-
-# Come up with a list of changed files into $TMP
-TMP=/tmp/tmp.paths.$$
-TOCLEAN="$TOCLEAN $TMP"
-grep '^+++\|^---' $PATCH_FILE | cut -c '5-' | grep -v /dev/null | sort | uniq > $TMP
-
-# Assume p0 to start
-PLEVEL=0
-
-# if all of the lines start with a/ or b/, then this is a git patch that
-# was generated without --no-prefix
-if ! grep -qv '^a/\|^b/' $TMP ; then
- echo Looks like this is a git patch. Stripping a/ and b/ prefixes
- echo and incrementing PLEVEL
- PLEVEL=$[$PLEVEL + 1]
- sed -i -e 's,^[ab]/,,' $TMP
-fi
-
-# if all of the lines start with common/, hdfs/, or mapreduce/, this is
-# relative to the hadoop root instead of the subproject root, so we need
-# to chop off another layer
-PREFIX_DIRS=$(cut -d '/' -f 1 $TMP | sort | uniq)
-if [[ "$PREFIX_DIRS" =~ ^(hdfs|common|mapreduce)$ ]]; then
-
- echo Looks like this is relative to project root. Increasing PLEVEL
- PLEVEL=$[$PLEVEL + 1]
-elif ! echo "$PREFIX_DIRS" | grep -vxq 'common\|hdfs\|mapreduce' ; then
- echo Looks like this is a cross-subproject patch. Not supported!
- exit 1
-fi
-
-echo Going to apply patch with: $PATCH -p$PLEVEL
-$PATCH -p$PLEVEL -E < $PATCH_FILE
-
-cleanup 0
diff --git a/common/src/test/bin/test-patch.sh b/common/src/test/bin/test-patch.sh
deleted file mode 100755
index 7e87c86641f..00000000000
--- a/common/src/test/bin/test-patch.sh
+++ /dev/null
@@ -1,701 +0,0 @@
-#!/usr/bin/env bash
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-#set -x
-ulimit -n 1024
-
-### Setup some variables.
-### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
-### Read variables from properties file
-bindir=$(dirname $0)
-. $bindir/../test-patch.properties
-
-###############################################################################
-parseArgs() {
- case "$1" in
- HUDSON)
- ### Set HUDSON to true to indicate that this script is being run by Hudson
- HUDSON=true
- if [[ $# != 16 ]] ; then
- echo "ERROR: usage $0 HUDSON "
- cleanupAndExit 0
- fi
- PATCH_DIR=$2
- SUPPORT_DIR=$3
- PS=$4
- WGET=$5
- JIRACLI=$6
- SVN=$7
- GREP=$8
- PATCH=$9
- FINDBUGS_HOME=${10}
- FORREST_HOME=${11}
- ECLIPSE_HOME=${12}
- BASEDIR=${13}
- JIRA_PASSWD=${14}
- CURL=${15}
- defect=${16}
-
- ### Retrieve the defect number
- if [ -z "$defect" ] ; then
- echo "Could not determine the patch to test. Exiting."
- cleanupAndExit 0
- fi
-
- if [ ! -e "$PATCH_DIR" ] ; then
- mkdir -p $PATCH_DIR
- fi
-
- ECLIPSE_PROPERTY="-Declipse.home=$ECLIPSE_HOME"
- ;;
- DEVELOPER)
- ### Set HUDSON to false to indicate that this script is being run by a developer
- HUDSON=false
- if [[ $# != 9 ]] ; then
- echo "ERROR: usage $0 DEVELOPER "
- cleanupAndExit 0
- fi
- ### PATCH_FILE contains the location of the patchfile
- PATCH_FILE=$2
- if [[ ! -e "$PATCH_FILE" ]] ; then
- echo "Unable to locate the patch file $PATCH_FILE"
- cleanupAndExit 0
- fi
- PATCH_DIR=$3
- ### Check if $PATCH_DIR exists. If it does not exist, create a new directory
- if [[ ! -e "$PATCH_DIR" ]] ; then
- mkdir "$PATCH_DIR"
- if [[ $? == 0 ]] ; then
- echo "$PATCH_DIR has been created"
- else
- echo "Unable to create $PATCH_DIR"
- cleanupAndExit 0
- fi
- fi
- SVN=$4
- GREP=$5
- PATCH=$6
- FINDBUGS_HOME=$7
- FORREST_HOME=$8
- BASEDIR=$9
- ### Obtain the patch filename to append it to the version number
- defect=`basename $PATCH_FILE`
- ;;
- *)
- echo "ERROR: usage $0 HUDSON [args] | DEVELOPER [args]"
- cleanupAndExit 0
- ;;
- esac
-}
-
-###############################################################################
-checkout () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Testing patch for ${defect}."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- ### When run by a developer, if the workspace contains modifications, do not continue
- status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'`
- if [[ $HUDSON == "false" ]] ; then
- if [[ "$status" != "" ]] ; then
- echo "ERROR: can't run in a workspace that contains the following modifications"
- echo "$status"
- cleanupAndExit 1
- fi
- else
- cd $BASEDIR
- $SVN revert -R .
- rm -rf `$SVN status --no-ignore`
- $SVN update
- fi
- return $?
-}
-
-###############################################################################
-setup () {
- ### Download latest patch file (ignoring .htm and .html) when run from patch process
- if [[ $HUDSON == "true" ]] ; then
- $WGET -q -O $PATCH_DIR/jira http://issues.apache.org/jira/browse/$defect
- if [[ `$GREP -c 'Patch Available' $PATCH_DIR/jira` == 0 ]] ; then
- echo "$defect is not \"Patch Available\". Exiting."
- cleanupAndExit 0
- fi
- relativePatchURL=`$GREP -o '"/jira/secure/attachment/[0-9]*/[^"]*' $PATCH_DIR/jira | $GREP -v -e 'htm[l]*$' | sort | tail -1 | $GREP -o '/jira/secure/attachment/[0-9]*/[^"]*'`
- patchURL="http://issues.apache.org${relativePatchURL}"
- patchNum=`echo $patchURL | $GREP -o '[0-9]*/' | $GREP -o '[0-9]*'`
- echo "$defect patch is being downloaded at `date` from"
- echo "$patchURL"
- $WGET -q -O $PATCH_DIR/patch $patchURL
- VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum}
- JIRA_COMMENT="Here are the results of testing the latest attachment
- $patchURL
- against trunk revision ${SVN_REVISION}."
-
- ### Copy in any supporting files needed by this process
- cp -r $SUPPORT_DIR/lib/* ./lib
- #PENDING: cp -f $SUPPORT_DIR/etc/checkstyle* ./src/test
- ### Copy the patch file to $PATCH_DIR
- else
- VERSION=PATCH-${defect}
- cp $PATCH_FILE $PATCH_DIR/patch
- if [[ $? == 0 ]] ; then
- echo "Patch file $PATCH_FILE copied to $PATCH_DIR"
- else
- echo "Could not copy $PATCH_FILE to $PATCH_DIR"
- cleanupAndExit 0
- fi
- fi
- ### exit if warnings are NOT defined in the properties file
- if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]]; then
- echo "Please define the following properties in test-patch.properties file"
- echo "OK_FINDBUGS_WARNINGS"
- echo "OK_RELEASEAUDIT_WARNINGS"
- echo "OK_JAVADOC_WARNINGS"
- cleanupAndExit 1
- fi
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Pre-build trunk to verify trunk stability and javac warnings"
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- echo "$ANT_HOME/bin/ant -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -D${PROJECT_NAME}PatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
- $ANT_HOME/bin/ant -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -D${PROJECT_NAME}PatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
- if [[ $? != 0 ]] ; then
- echo "Trunk compilation is broken?"
- cleanupAndExit 1
- fi
-}
-
-###############################################################################
-### Check for @author tags in the patch
-checkAuthor () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Checking there are no @author tags in the patch."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- authorTags=`$GREP -c -i '@author' $PATCH_DIR/patch`
- echo "There appear to be $authorTags @author tags in the patch."
- if [[ $authorTags != 0 ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 @author. The patch appears to contain $authorTags @author tags which the Hadoop community has agreed to not allow in code contributions."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 @author. The patch does not contain any @author tags."
- return 0
-}
-
-###############################################################################
-### Check for tests in the patch
-checkTests () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Checking there are new or changed tests in the patch."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- testReferences=`$GREP -c -i '/test' $PATCH_DIR/patch`
- echo "There appear to be $testReferences test files referenced in the patch."
- if [[ $testReferences == 0 ]] ; then
- if [[ $HUDSON == "true" ]] ; then
- patchIsDoc=`$GREP -c -i 'title="documentation' $PATCH_DIR/jira`
- if [[ $patchIsDoc != 0 ]] ; then
- echo "The patch appears to be a documentation patch that doesn't require tests."
- JIRA_COMMENT="$JIRA_COMMENT
-
- +0 tests included. The patch appears to be a documentation patch that doesn't require tests."
- return 0
- fi
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 tests included. The patch doesn't appear to include any new or modified tests.
- Please justify why no new tests are needed for this patch.
- Also please list what manual steps were performed to verify this patch."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 tests included. The patch appears to include $testReferences new or modified tests."
- return 0
-}
-
-cleanUpXml () {
- cd $BASEDIR/conf
- for file in `ls *.xml.template`
- do
- rm -f `basename $file .template`
- done
- cd $BASEDIR
-}
-
-###############################################################################
-### Attempt to apply the patch
-applyPatch () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Applying patch."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- export PATCH
- $bindir/smart-apply-patch.sh $PATCH_DIR/patch
- if [[ $? != 0 ]] ; then
- echo "PATCH APPLICATION FAILED"
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 patch. The patch command could not apply the patch."
- return 1
- fi
- return 0
-}
-
-###############################################################################
-### Check there are no javadoc warnings
-checkJavadocWarnings () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Determining number of patched javadoc warnings."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt
- javadocWarnings=`$GREP -o '\[javadoc\] [0-9]* warning' $PATCH_DIR/patchJavadocWarnings.txt | awk '{total += $2} END {print total}'`
- echo ""
- echo ""
- echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
-
- ### if current warnings greater than OK_JAVADOC_WARNINGS
- if [[ $javadocWarnings > $OK_JAVADOC_WARNINGS ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 javadoc. The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 javadoc. The javadoc tool did not generate any warning messages."
- return 0
-}
-
-###############################################################################
-### Check there are no changes in the number of Javac warnings
-checkJavacWarnings () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Determining number of patched javac warnings."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1
- if [[ $? != 0 ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 javac. The patch appears to cause tar ant target to fail."
- return 1
- fi
- ### Compare trunk and patch javac warning numbers
- if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
- trunkJavacWarnings=`$GREP -o '\[javac\] [0-9]* warning' $PATCH_DIR/trunkJavacWarnings.txt | awk '{total += $2} END {print total}'`
- patchJavacWarnings=`$GREP -o '\[javac\] [0-9]* warning' $PATCH_DIR/patchJavacWarnings.txt | awk '{total += $2} END {print total}'`
- echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch."
- if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then
- if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 javac. The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)."
- return 1
- fi
- fi
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 javac. The applied patch does not increase the total number of javac compiler warnings."
- return 0
-}
-
-###############################################################################
-### Check there are no changes in the number of release audit (RAT) warnings
-checkReleaseAuditWarnings () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Determining number of patched release audit warnings."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1
-
- ### Compare trunk and patch release audit warning numbers
- if [[ -f $PATCH_DIR/patchReleaseAuditWarnings.txt ]] ; then
- patchReleaseAuditWarnings=`$GREP -c '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt`
- echo ""
- echo ""
- echo "There appear to be $OK_RELEASEAUDIT_WARNINGS release audit warnings before the patch and $patchReleaseAuditWarnings release audit warnings after applying the patch."
- if [[ $patchReleaseAuditWarnings != "" && $OK_RELEASEAUDIT_WARNINGS != "" ]] ; then
- if [[ $patchReleaseAuditWarnings -gt $OK_RELEASEAUDIT_WARNINGS ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 release audit. The applied patch generated $patchReleaseAuditWarnings release audit warnings (more than the trunk's current $OK_RELEASEAUDIT_WARNINGS warnings)."
- $GREP '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt > $PATCH_DIR/patchReleaseAuditProblems.txt
- echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." >> $PATCH_DIR/patchReleaseAuditProblems.txt
- JIRA_COMMENT_FOOTER="Release audit warnings: $BUILD_URL/artifact/trunk/patchprocess/patchReleaseAuditProblems.txt
-$JIRA_COMMENT_FOOTER"
- return 1
- fi
- fi
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 release audit. The applied patch does not increase the total number of release audit warnings."
- return 0
-}
-
-###############################################################################
-### Check there are no changes in the number of Checkstyle warnings
-checkStyle () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Determining number of patched checkstyle warnings."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- echo "THIS IS NOT IMPLEMENTED YET"
- echo ""
- echo ""
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle
- JIRA_COMMENT_FOOTER="Checkstyle results: $BUILD_URL/artifact/trunk/build/test/checkstyle-errors.html
-$JIRA_COMMENT_FOOTER"
- ### TODO: calculate actual patchStyleErrors
-# patchStyleErrors=0
-# if [[ $patchStyleErrors != 0 ]] ; then
-# JIRA_COMMENT="$JIRA_COMMENT
-#
-# -1 checkstyle. The patch generated $patchStyleErrors code style errors."
-# return 1
-# fi
-# JIRA_COMMENT="$JIRA_COMMENT
-#
-# +1 checkstyle. The patch generated 0 code style errors."
- return 0
-}
-
-###############################################################################
-### Check there are no changes in the number of Findbugs warnings
-checkFindbugsWarnings () {
- findbugs_version=`${FINDBUGS_HOME}/bin/findbugs -version`
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Determining number of patched Findbugs warnings."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=$FINDBUGS_HOME -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=${FINDBUGS_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs
- if [ $? != 0 ] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 findbugs. The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
- return 1
- fi
-JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/build/test/findbugs/newPatchFindbugsWarnings.html
-$JIRA_COMMENT_FOOTER"
- cp $BASEDIR/build/test/findbugs/*.xml $PATCH_DIR/patchFindbugsWarnings.xml
- $FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \
- $PATCH_DIR/patchFindbugsWarnings.xml \
- $PATCH_DIR/patchFindbugsWarnings.xml
- findbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/patchFindbugsWarnings.xml \
- $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml | /usr/bin/awk '{print $1}'`
- $FINDBUGS_HOME/bin/convertXmlToText -html \
- $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml \
- $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.html
- cp $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.html $PATCH_DIR/newPatchFindbugsWarnings.html
- cp $BASEDIR/build/test/findbugs/newPatchFindbugsWarnings.xml $PATCH_DIR/newPatchFindbugsWarnings.xml
-
- ### if current warnings greater than OK_FINDBUGS_WARNINGS
- if [[ $findbugsWarnings > $OK_FINDBUGS_WARNINGS ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 findbugs. The patch appears to introduce `expr $(($findbugsWarnings-$OK_FINDBUGS_WARNINGS))` new Findbugs (version ${findbugs_version}) warnings."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 findbugs. The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings."
- return 0
-}
-
-###############################################################################
-### Run the test-core target
-runCoreTests () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Running core tests."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
-
- ### Kill any rogue build processes from the last attempt
- $PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
- PreTestTarget=""
- if [[ $defect == MAPREDUCE-* ]] ; then
- PreTestTarget="create-c++-configure"
- fi
-
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME $PreTestTarget test-core"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME $PreTestTarget test-core
- if [[ $? != 0 ]] ; then
- ### Find and format names of failed tests
- failed_tests=`grep -l -E " /dev/null
-
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib
- if [[ $? != 0 ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 contrib tests. The patch failed contrib unit tests."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 contrib tests. The patch passed contrib unit tests."
- return 0
-}
-
-###############################################################################
-### Run the inject-system-faults target
-checkInjectSystemFaults () {
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Checking the integrity of system test framework code."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
-
- ### Kill any rogue build processes from the last attempt
- $PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
-
- echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults"
- $ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults
- if [[ $? != 0 ]] ; then
- JIRA_COMMENT="$JIRA_COMMENT
-
- -1 system test framework. The patch failed system test framework compile."
- return 1
- fi
- JIRA_COMMENT="$JIRA_COMMENT
-
- +1 system test framework. The patch passed system test framework compile."
- return 0
-}
-
-###############################################################################
-### Submit a comment to the defect's Jira
-submitJiraComment () {
- local result=$1
- ### Do not output the value of JIRA_COMMENT_FOOTER when run by a developer
- if [[ $HUDSON == "false" ]] ; then
- JIRA_COMMENT_FOOTER=""
- fi
- if [[ $result == 0 ]] ; then
- comment="+1 overall. $JIRA_COMMENT
-
-$JIRA_COMMENT_FOOTER"
- else
- comment="-1 overall. $JIRA_COMMENT
-
-$JIRA_COMMENT_FOOTER"
- fi
- ### Output the test result to the console
- echo "
-
-
-
-$comment"
-
- if [[ $HUDSON == "true" ]] ; then
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Adding comment to Jira."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- ### Update Jira with a comment
- export USER=hudson
- $JIRACLI -s https://issues.apache.org/jira -a addcomment -u hadoopqa -p $JIRA_PASSWD --comment "$comment" --issue $defect
- $JIRACLI -s https://issues.apache.org/jira -a logout -u hadoopqa -p $JIRA_PASSWD
- fi
-}
-
-###############################################################################
-### Cleanup files
-cleanupAndExit () {
- local result=$1
- if [[ $HUDSON == "true" ]] ; then
- if [ -e "$PATCH_DIR" ] ; then
- mv $PATCH_DIR $BASEDIR
- fi
- fi
- echo ""
- echo ""
- echo "======================================================================"
- echo "======================================================================"
- echo " Finished build."
- echo "======================================================================"
- echo "======================================================================"
- echo ""
- echo ""
- exit $result
-}
-
-###############################################################################
-###############################################################################
-###############################################################################
-
-JIRA_COMMENT=""
-JIRA_COMMENT_FOOTER="Console output: $BUILD_URL/console
-
-This message is automatically generated."
-
-### Check if arguments to the script have been specified properly or not
-parseArgs $@
-cd $BASEDIR
-
-checkout
-RESULT=$?
-if [[ $HUDSON == "true" ]] ; then
- if [[ $RESULT != 0 ]] ; then
- exit 100
- fi
-fi
-setup
-checkAuthor
-RESULT=$?
-
-if [[ $HUDSON == "true" ]] ; then
- cleanUpXml
-fi
-checkTests
-(( RESULT = RESULT + $? ))
-applyPatch
-if [[ $? != 0 ]] ; then
- submitJiraComment 1
- cleanupAndExit 1
-fi
-checkJavadocWarnings
-(( RESULT = RESULT + $? ))
-checkJavacWarnings
-(( RESULT = RESULT + $? ))
-### Checkstyle not implemented yet
-#checkStyle
-#(( RESULT = RESULT + $? ))
-checkFindbugsWarnings
-(( RESULT = RESULT + $? ))
-checkReleaseAuditWarnings
-(( RESULT = RESULT + $? ))
-### Do not call these when run by a developer
-if [[ $HUDSON == "true" ]] ; then
- runCoreTests
- (( RESULT = RESULT + $? ))
- runContribTests
- (( RESULT = RESULT + $? ))
-fi
-checkInjectSystemFaults
-(( RESULT = RESULT + $? ))
-JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
-$JIRA_COMMENT_FOOTER"
-
-submitJiraComment $RESULT
-cleanupAndExit $RESULT
diff --git a/dev-support/test-patch.sh b/dev-support/test-patch.sh
index 6325e6a193b..1ad507a326e 100755
--- a/dev-support/test-patch.sh
+++ b/dev-support/test-patch.sh
@@ -597,20 +597,23 @@ runTests () {
echo "======================================================================"
echo ""
echo ""
-
- echo "$MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess"
- $MVN clean install test -Pnative -D${PROJECT_NAME}PatchProcess
+
+ echo "$MVN clean install -Pnative -D${PROJECT_NAME}PatchProcess"
+ $MVN clean install -Pnative -D${PROJECT_NAME}PatchProcess
if [[ $? != 0 ]] ; then
### Find and format names of failed tests
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "
+
+ hadoop-mapreduce-dist
+
+ dir
+
+ false
+
+
+
+ hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/target/native/target/usr/local/bin
+ bin
+ 0755
+
+
+ hadoop-yarn/bin
+ bin
+
+ *
+
+ 0755
+
+
+ bin
+ bin
+
+ *
+
+ 0755
+
+
+ hadoop-yarn/conf
+ conf
+
+ **/*
+
+
+
+
+
+
+ org.apache.hadoop:hadoop-yarn-server-tests
+
+
+ modules
+ false
+ false
+
+
+
+
+
+ false
+ /lib
+
+
+ org.apache.hadoop:hadoop-common
+ org.apache.hadoop:hadoop-hdfs
+
+
+
+
diff --git a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
index 1829f22ea4c..fd03bfd68da 100644
--- a/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
+++ b/hadoop-assemblies/src/main/resources/assemblies/hadoop-src.xml
@@ -19,18 +19,29 @@
xsi:schemaLocation="http://maven.apache.org/plugins/maven-assembly-plugin/assembly/1.1.0 http://maven.apache.org/xsd/assembly-1.1.0.xsd">
hadoop-src
- dir
+ tar.gz
- false
+ true
- ${project.basedir}
- src/
+ .true
+ .git/**
+ **/.gitignore
+ **/.svn
+ **/*.iws
+ **/*.ipr
+ **/*.iml
+ **/.classpath
+ **/.project
+ **/.settings
+ **/target/**
+
**/*.log**/build/**
- **/target/**
+ **/file:/**
+ **/SecurityAuth.audit*
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsStandardDoclet.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsStandardDoclet.java
new file mode 100644
index 00000000000..10d554d07b5
--- /dev/null
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/IncludePublicAnnotationsStandardDoclet.java
@@ -0,0 +1,63 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.classification.tools;
+
+import com.sun.javadoc.DocErrorReporter;
+import com.sun.javadoc.LanguageVersion;
+import com.sun.javadoc.RootDoc;
+import com.sun.tools.doclets.standard.Standard;
+
+/**
+ * A Doclet
+ * that only includes class-level elements that are annotated with
+ * {@link org.apache.hadoop.classification.InterfaceAudience.Public}.
+ * Class-level elements with no annotation are excluded.
+ * In addition, all elements that are annotated with
+ * {@link org.apache.hadoop.classification.InterfaceAudience.Private} or
+ * {@link org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate}
+ * are also excluded.
+ * It delegates to the Standard Doclet, and takes the same options.
+ */
+public class IncludePublicAnnotationsStandardDoclet {
+
+ public static LanguageVersion languageVersion() {
+ return LanguageVersion.JAVA_1_5;
+ }
+
+ public static boolean start(RootDoc root) {
+ System.out.println(
+ IncludePublicAnnotationsStandardDoclet.class.getSimpleName());
+ RootDocProcessor.treatUnannotatedClassesAsPrivate = true;
+ return Standard.start(RootDocProcessor.process(root));
+ }
+
+ public static int optionLength(String option) {
+ Integer length = StabilityOptions.optionLength(option);
+ if (length != null) {
+ return length;
+ }
+ return Standard.optionLength(option);
+ }
+
+ public static boolean validOptions(String[][] options,
+ DocErrorReporter reporter) {
+ StabilityOptions.validOptions(options, reporter);
+ String[][] filteredOptions = StabilityOptions.filterOptions(options);
+ return Standard.validOptions(filteredOptions, reporter);
+ }
+}
diff --git a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
index 5df42c2ef5f..2783bf3b308 100644
--- a/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
+++ b/hadoop-common-project/hadoop-annotations/src/main/java/org/apache/hadoop/classification/tools/RootDocProcessor.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.classification.InterfaceStability;
class RootDocProcessor {
static String stability = StabilityOptions.UNSTABLE_OPTION;
+ static boolean treatUnannotatedClassesAsPrivate = false;
public static RootDoc process(RootDoc root) {
return (RootDoc) process(root, RootDoc.class);
@@ -201,6 +202,17 @@ class RootDocProcessor {
}
}
}
+ for (AnnotationDesc annotation : annotations) {
+ String qualifiedTypeName =
+ annotation.annotationType().qualifiedTypeName();
+ if (qualifiedTypeName.equals(
+ InterfaceAudience.Public.class.getCanonicalName())) {
+ return false;
+ }
+ }
+ }
+ if (treatUnannotatedClassesAsPrivate) {
+ return doc.isClass() || doc.isInterface() || doc.isAnnotationType();
}
return false;
}
diff --git a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
index 121d96628ba..38b51cbaa75 100644
--- a/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
+++ b/hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
@@ -151,15 +151,13 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
throw new ServletException("Keytab does not exist: " + keytab);
}
- String nameRules = config.getProperty(NAME_RULES, "DEFAULT");
- KerberosName.setRules(nameRules);
-
Set principals = new HashSet();
principals.add(new KerberosPrincipal(principal));
Subject subject = new Subject(false, principals, new HashSet
+ * Having a prioritised queue allows the {@link BlockManager} to select
+ * which blocks to replicate first -it tries to give priority to data
+ * that is most at risk or considered most valuable.
+ *
+ *
+ * The policy for choosing which priority to give added blocks
+ * is implemented in {@link #getPriority(Block, int, int, int)}.
+ *
+ *
The queue order is as follows:
+ *
+ *
{@link #QUEUE_HIGHEST_PRIORITY}: the blocks that must be replicated
+ * first. That is blocks with only one copy, or blocks with zero live
+ * copies but a copy in a node being decommissioned. These blocks
+ * are at risk of loss if the disk or server on which they
+ * remain fails.
+ *
{@link #QUEUE_VERY_UNDER_REPLICATED}: blocks that are very
+ * under-replicated compared to their expected values. Currently
+ * that means the ratio of the ratio of actual:expected means that
+ * there is less than 1:3.
. These blocks may not be at risk,
+ * but they are clearly considered "important".
+ *
{@link #QUEUE_UNDER_REPLICATED}: blocks that are also under
+ * replicated, and the ratio of actual:expected is good enough that
+ * they do not need to go into the {@link #QUEUE_VERY_UNDER_REPLICATED}
+ * queue.
+ *
{@link #QUEUE_REPLICAS_BADLY_DISTRIBUTED}: there are as least as
+ * many copies of a block as required, but the blocks are not adequately
+ * distributed. Loss of a rack/switch could take all copies off-line.
+ *
{@link #QUEUE_WITH_CORRUPT_BLOCKS} This is for blocks that are corrupt
+ * and for which there are no-non-corrupt copies (currently) available.
+ * The policy here is to keep those corrupt blocks replicated, but give
+ * blocks that are not corrupt higher priority.
+ *
*/
class UnderReplicatedBlocks implements Iterable {
+ /** The total number of queues : {@value} */
static final int LEVEL = 5;
+ /** The queue with the highest priority: {@value} */
+ static final int QUEUE_HIGHEST_PRIORITY = 0;
+ /** The queue for blocks that are way below their expected value : {@value} */
+ static final int QUEUE_VERY_UNDER_REPLICATED = 1;
+ /** The queue for "normally" under-replicated blocks: {@value} */
+ static final int QUEUE_UNDER_REPLICATED = 2;
+ /** The queue for blocks that have the right number of replicas,
+ * but which the block manager felt were badly distributed: {@value}
+ */
+ static final int QUEUE_REPLICAS_BADLY_DISTRIBUTED = 3;
+ /** The queue for corrupt blocks: {@value} */
static final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
+ /** the queues themselves */
private final List> priorityQueues
- = new ArrayList>();
-
+ = new ArrayList>(LEVEL);
+
/** Create an object. */
UnderReplicatedBlocks() {
- for(int i=0; i());
}
}
@@ -47,7 +94,7 @@ class UnderReplicatedBlocks implements Iterable {
* Empty the queues.
*/
void clear() {
- for(int i=0; i {
/** Return the total number of under replication blocks */
synchronized int size() {
int size = 0;
- for (int i=0; i {
/** Return the number of under replication blocks excluding corrupt blocks */
synchronized int getUnderReplicatedBlockCount() {
int size = 0;
- for (int i=0; i {
/** Check if a block is in the neededReplication queue */
synchronized boolean contains(Block block) {
- for(NavigableSet set : priorityQueues) {
- if(set.contains(block)) { return true; }
+ for (NavigableSet set : priorityQueues) {
+ if (set.contains(block)) {
+ return true;
+ }
}
return false;
}
-
+
/** Return the priority of a block
- * @param block a under replication block
+ * @param block a under replicated block
* @param curReplicas current number of replicas of the block
* @param expectedReplicas expected number of replicas of the block
+ * @return the priority for the blocks, between 0 and ({@link #LEVEL}-1)
*/
- private int getPriority(Block block,
+ private int getPriority(Block block,
int curReplicas,
int decommissionedReplicas,
int expectedReplicas) {
assert curReplicas >= 0 : "Negative replicas!";
if (curReplicas >= expectedReplicas) {
- return 3; // Block doesn't have enough racks
- } else if(curReplicas==0) {
- // If there are zero non-decommissioned replica but there are
+ // Block has enough copies, but not enough racks
+ return QUEUE_REPLICAS_BADLY_DISTRIBUTED;
+ } else if (curReplicas == 0) {
+ // If there are zero non-decommissioned replicas but there are
// some decommissioned replicas, then assign them highest priority
if (decommissionedReplicas > 0) {
- return 0;
+ return QUEUE_HIGHEST_PRIORITY;
}
- return QUEUE_WITH_CORRUPT_BLOCKS; // keep these blocks in needed replication.
- } else if(curReplicas==1) {
- return 0; // highest priority
- } else if(curReplicas*3 {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.add:"
+ block
- + " has only "+curReplicas
+ + " has only " + curReplicas
+ " replicas and need " + expectedReplicas
+ " replicas so is added to neededReplications"
+ " at priority level " + priLevel);
@@ -149,8 +209,22 @@ class UnderReplicatedBlocks implements Iterable {
oldExpectedReplicas);
return remove(block, priLevel);
}
-
- /** remove a block from a under replication queue given a priority*/
+
+ /**
+ * Remove a block from the under replication queues.
+ *
+ * The priLevel parameter is a hint of which queue to query
+ * first: if negative or >= {@link #LEVEL} this shortcutting
+ * is not attmpted.
+ *
+ * If the block is not found in the nominated queue, an attempt is made to
+ * remove it from all queues.
+ *
+ * Warning: This is not a synchronized method.
+ * @param block block to remove
+ * @param priLevel expected privilege level
+ * @return true if the block was found and removed from one of the priority queues
+ */
boolean remove(Block block, int priLevel) {
if(priLevel >= 0 && priLevel < LEVEL
&& priorityQueues.get(priLevel).remove(block)) {
@@ -164,8 +238,8 @@ class UnderReplicatedBlocks implements Iterable {
} else {
// Try to remove the block from all queues if the block was
// not found in the queue for the given priority level.
- for(int i=0; i {
}
return false;
}
-
- /** update the priority level of a block */
- synchronized void update(Block block, int curReplicas,
+
+ /**
+ * Recalculate and potentially update the priority level of a block.
+ *
+ * If the block priority has changed from before an attempt is made to
+ * remove it from the block queue. Regardless of whether or not the block
+ * is in the block queue of (recalculate) priority, an attempt is made
+ * to add it to that queue. This ensures that the block will be
+ * in its expected priority queue (and only that queue) by the end of the
+ * method call.
+ * @param block a under replicated block
+ * @param curReplicas current number of replicas of the block
+ * @param decommissionedReplicas the number of decommissioned replicas
+ * @param curExpectedReplicas expected number of replicas of the block
+ * @param curReplicasDelta the change in the replicate count from before
+ * @param expectedReplicasDelta the change in the expected replica count from before
+ */
+ synchronized void update(Block block, int curReplicas,
int decommissionedReplicas,
int curExpectedReplicas,
int curReplicasDelta, int expectedReplicasDelta) {
@@ -206,7 +295,7 @@ class UnderReplicatedBlocks implements Iterable {
NameNode.stateChangeLog.debug(
"BLOCK* NameSystem.UnderReplicationBlock.update:"
+ block
- + " has only "+curReplicas
+ + " has only "+ curReplicas
+ " replicas and needs " + curExpectedReplicas
+ " replicas so is added to neededReplications"
+ " at priority level " + curPri);
@@ -218,17 +307,24 @@ class UnderReplicatedBlocks implements Iterable {
synchronized BlockIterator iterator(int level) {
return new BlockIterator(level);
}
-
+
/** return an iterator of all the under replication blocks */
+ @Override
public synchronized BlockIterator iterator() {
return new BlockIterator();
}
-
+
+ /**
+ * An iterator over blocks.
+ */
class BlockIterator implements Iterator {
private int level;
private boolean isIteratorForLevel = false;
private List> iterators = new ArrayList>();
+ /**
+ * Construct an iterator over all queues.
+ */
private BlockIterator() {
level=0;
for(int i=0; i {
}
}
+ /**
+ * Constrict an iterator for a single queue level
+ * @param l the priority level to iterate over
+ */
private BlockIterator(int l) {
level = l;
isIteratorForLevel = true;
@@ -243,8 +343,9 @@ class UnderReplicatedBlocks implements Iterable {
}
private void update() {
- if (isIteratorForLevel)
+ if (isIteratorForLevel) {
return;
+ }
while(level< LEVEL-1 && !iterators.get(level).hasNext()) {
level++;
}
@@ -252,30 +353,33 @@ class UnderReplicatedBlocks implements Iterable {
@Override
public Block next() {
- if (isIteratorForLevel)
+ if (isIteratorForLevel) {
return iterators.get(0).next();
+ }
update();
return iterators.get(level).next();
}
@Override
public boolean hasNext() {
- if (isIteratorForLevel)
+ if (isIteratorForLevel) {
return iterators.get(0).hasNext();
+ }
update();
return iterators.get(level).hasNext();
}
@Override
public void remove() {
- if (isIteratorForLevel)
+ if (isIteratorForLevel) {
iterators.get(0).remove();
- else
+ } else {
iterators.get(level).remove();
+ }
}
int getPriority() {
return level;
}
- }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
index 67f67c03958..8ae1390ed8d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
@@ -53,6 +53,7 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.namenode.NameNodeHttpServer;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
@@ -552,6 +553,13 @@ public class JspHelper {
DataInputStream in = new DataInputStream(buf);
DelegationTokenIdentifier id = new DelegationTokenIdentifier();
id.readFields(in);
+ if (context != null) {
+ final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
+ if (nn != null) {
+ // Verify the token.
+ nn.getNamesystem().verifyToken(id, token.getPassword());
+ }
+ }
ugi = id.getUser();
checkUsername(ugi.getShortUserName(), usernameFromQuery);
checkUsername(ugi.getShortUserName(), user);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
index 2a53b3dd78a..579eb8ed1a4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
@@ -403,8 +403,8 @@ class BlockPoolSliceScanner {
try {
adjustThrottler();
- blockSender = new BlockSender(block, 0, -1, false, false, true,
- datanode, null);
+ blockSender = new BlockSender(block, 0, -1, false, true, datanode,
+ null);
DataOutputStream out =
new DataOutputStream(new IOUtils.NullOutputStream());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
index 50e118aaa00..94920fd5bc3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
@@ -24,6 +24,7 @@ import java.io.Closeable;
import java.io.DataInputStream;
import java.io.DataOutputStream;
import java.io.EOFException;
+import java.io.FileDescriptor;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
@@ -46,6 +47,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.PureJavaCrc32;
@@ -57,10 +59,13 @@ import org.apache.hadoop.util.PureJavaCrc32;
class BlockReceiver implements Closeable {
public static final Log LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
+
+ private static final long CACHE_DROP_LAG_BYTES = 8 * 1024 * 1024;
private DataInputStream in = null; // from where data are read
private DataChecksum checksum; // from where chunks of a block can be read
private OutputStream out = null; // to block file at local disk
+ private FileDescriptor outFd;
private OutputStream cout = null; // output stream for cehcksum file
private DataOutputStream checksumOut = null; // to crc file at local disk
private int bytesPerChecksum;
@@ -80,6 +85,11 @@ class BlockReceiver implements Closeable {
private final DataNode datanode;
volatile private boolean mirrorError;
+ // Cache management state
+ private boolean dropCacheBehindWrites;
+ private boolean syncBehindWrites;
+ private long lastCacheDropOffset = 0;
+
/** The client name. It is empty if a datanode is the client */
private final String clientname;
private final boolean isClient;
@@ -98,7 +108,8 @@ class BlockReceiver implements Closeable {
final BlockConstructionStage stage,
final long newGs, final long minBytesRcvd, final long maxBytesRcvd,
final String clientname, final DatanodeInfo srcDataNode,
- final DataNode datanode) throws IOException {
+ final DataNode datanode, DataChecksum requestedChecksum)
+ throws IOException {
try{
this.block = block;
this.in = in;
@@ -167,9 +178,11 @@ class BlockReceiver implements Closeable {
}
}
// read checksum meta information
- this.checksum = DataChecksum.newDataChecksum(in);
+ this.checksum = requestedChecksum;
this.bytesPerChecksum = checksum.getBytesPerChecksum();
this.checksumSize = checksum.getChecksumSize();
+ this.dropCacheBehindWrites = datanode.shouldDropCacheBehindWrites();
+ this.syncBehindWrites = datanode.shouldSyncBehindWrites();
final boolean isCreate = isDatanode || isTransfer
|| stage == BlockConstructionStage.PIPELINE_SETUP_CREATE;
@@ -177,6 +190,12 @@ class BlockReceiver implements Closeable {
this.bytesPerChecksum, this.checksumSize);
if (streams != null) {
this.out = streams.dataOut;
+ if (out instanceof FileOutputStream) {
+ this.outFd = ((FileOutputStream)out).getFD();
+ } else {
+ LOG.warn("Could not get file descriptor for outputstream of class " +
+ out.getClass());
+ }
this.cout = streams.checksumOut;
this.checksumOut = new DataOutputStream(new BufferedOutputStream(
streams.checksumOut, HdfsConstants.SMALL_BUFFER_SIZE));
@@ -631,6 +650,8 @@ class BlockReceiver implements Closeable {
);
datanode.metrics.incrBytesWritten(len);
+
+ dropOsCacheBehindWriter(offsetInBlock);
}
} catch (IOException iex) {
datanode.checkDiskError(iex);
@@ -645,10 +666,27 @@ class BlockReceiver implements Closeable {
return lastPacketInBlock?-1:len;
}
- void writeChecksumHeader(DataOutputStream mirrorOut) throws IOException {
- checksum.writeHeader(mirrorOut);
+ private void dropOsCacheBehindWriter(long offsetInBlock) throws IOException {
+ try {
+ if (outFd != null &&
+ offsetInBlock > lastCacheDropOffset + CACHE_DROP_LAG_BYTES) {
+ long twoWindowsAgo = lastCacheDropOffset - CACHE_DROP_LAG_BYTES;
+ if (twoWindowsAgo > 0 && dropCacheBehindWrites) {
+ NativeIO.posixFadviseIfPossible(outFd, 0, lastCacheDropOffset,
+ NativeIO.POSIX_FADV_DONTNEED);
+ }
+
+ if (syncBehindWrites) {
+ NativeIO.syncFileRangeIfPossible(outFd, lastCacheDropOffset, CACHE_DROP_LAG_BYTES,
+ NativeIO.SYNC_FILE_RANGE_WRITE);
+ }
+
+ lastCacheDropOffset += CACHE_DROP_LAG_BYTES;
+ }
+ } catch (Throwable t) {
+ LOG.warn("Couldn't drop os cache behind writer for " + block, t);
+ }
}
-
void receiveBlock(
DataOutputStream mirrOut, // output to next datanode
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
index 84b38b37e9a..f4168ee1c90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.DataOutputStream;
+import java.io.FileDescriptor;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
@@ -36,6 +37,9 @@ import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.ReadaheadPool;
+import org.apache.hadoop.io.ReadaheadPool.ReadaheadRequest;
+import org.apache.hadoop.io.nativeio.NativeIO;
import org.apache.hadoop.net.SocketOutputStream;
import org.apache.hadoop.util.DataChecksum;
@@ -118,7 +122,9 @@ class BlockSender implements java.io.Closeable {
private DataInputStream checksumIn;
/** Checksum utility */
private final DataChecksum checksum;
- /** Starting position to read */
+ /** Initial position to read */
+ private long initialOffset;
+ /** Current position of read */
private long offset;
/** Position of last byte to read from block file */
private final long endOffset;
@@ -128,8 +134,6 @@ class BlockSender implements java.io.Closeable {
private final int checksumSize;
/** If true, failure to read checksum is ignored */
private final boolean corruptChecksumOk;
- /** true if chunk offset is needed to be sent in Checksum header */
- private final boolean chunkOffsetOK;
/** Sequence number of packet being sent */
private long seqno;
/** Set to true if transferTo is allowed for sending data to the client */
@@ -142,6 +146,24 @@ class BlockSender implements java.io.Closeable {
private final String clientTraceFmt;
private volatile ChunkChecksum lastChunkChecksum = null;
+ /** The file descriptor of the block being sent */
+ private FileDescriptor blockInFd;
+
+ // Cache-management related fields
+ private final long readaheadLength;
+ private boolean shouldDropCacheBehindRead;
+ private ReadaheadRequest curReadahead;
+ private long lastCacheDropOffset;
+ private static final long CACHE_DROP_INTERVAL_BYTES = 1024 * 1024; // 1MB
+ /**
+ * Minimum length of read below which management of the OS
+ * buffer cache is disabled.
+ */
+ private static final long LONG_READ_THRESHOLD_BYTES = 256 * 1024;
+
+ private static ReadaheadPool readaheadPool =
+ ReadaheadPool.getInstance();
+
/**
* Constructor
*
@@ -149,22 +171,22 @@ class BlockSender implements java.io.Closeable {
* @param startOffset starting offset to read from
* @param length length of data to read
* @param corruptChecksumOk
- * @param chunkOffsetOK need to send check offset in checksum header
* @param verifyChecksum verify checksum while reading the data
* @param datanode datanode from which the block is being read
* @param clientTraceFmt format string used to print client trace logs
* @throws IOException
*/
BlockSender(ExtendedBlock block, long startOffset, long length,
- boolean corruptChecksumOk, boolean chunkOffsetOK,
- boolean verifyChecksum, DataNode datanode, String clientTraceFmt)
+ boolean corruptChecksumOk, boolean verifyChecksum,
+ DataNode datanode, String clientTraceFmt)
throws IOException {
try {
this.block = block;
- this.chunkOffsetOK = chunkOffsetOK;
this.corruptChecksumOk = corruptChecksumOk;
this.verifyChecksum = verifyChecksum;
this.clientTraceFmt = clientTraceFmt;
+ this.readaheadLength = datanode.getReadaheadLength();
+ this.shouldDropCacheBehindRead = datanode.shouldDropCacheBehindReads();
synchronized(datanode.data) {
this.replica = getReplica(block, datanode);
@@ -277,6 +299,11 @@ class BlockSender implements java.io.Closeable {
DataNode.LOG.debug("replica=" + replica);
}
blockIn = datanode.data.getBlockInputStream(block, offset); // seek to offset
+ if (blockIn instanceof FileInputStream) {
+ blockInFd = ((FileInputStream)blockIn).getFD();
+ } else {
+ blockInFd = null;
+ }
} catch (IOException ioe) {
IOUtils.closeStream(this);
IOUtils.closeStream(blockIn);
@@ -288,6 +315,20 @@ class BlockSender implements java.io.Closeable {
* close opened files.
*/
public void close() throws IOException {
+ if (blockInFd != null && shouldDropCacheBehindRead) {
+ // drop the last few MB of the file from cache
+ try {
+ NativeIO.posixFadviseIfPossible(
+ blockInFd, lastCacheDropOffset, offset - lastCacheDropOffset,
+ NativeIO.POSIX_FADV_DONTNEED);
+ } catch (Exception e) {
+ LOG.warn("Unable to drop cache on file close", e);
+ }
+ }
+ if (curReadahead != null) {
+ curReadahead.cancel();
+ }
+
IOException ioe = null;
if(checksumIn!=null) {
try {
@@ -304,6 +345,7 @@ class BlockSender implements java.io.Closeable {
ioe = e;
}
blockIn = null;
+ blockInFd = null;
}
// throw IOException if there is any
if(ioe!= null) {
@@ -538,14 +580,22 @@ class BlockSender implements java.io.Closeable {
if (out == null) {
throw new IOException( "out stream is null" );
}
- final long initialOffset = offset;
+ initialOffset = offset;
long totalRead = 0;
OutputStream streamForSendChunks = out;
+ lastCacheDropOffset = initialOffset;
+
+ if (isLongRead() && blockInFd != null) {
+ // Advise that this file descriptor will be accessed sequentially.
+ NativeIO.posixFadviseIfPossible(blockInFd, 0, 0, NativeIO.POSIX_FADV_SEQUENTIAL);
+ }
+
+ // Trigger readahead of beginning of file if configured.
+ manageOsCache();
+
final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;
try {
- writeChecksumHeader(out);
-
int maxChunksPerPacket;
int pktSize = PacketHeader.PKT_HEADER_LEN;
boolean transferTo = transferToAllowed && !verifyChecksum
@@ -569,6 +619,7 @@ class BlockSender implements java.io.Closeable {
ByteBuffer pktBuf = ByteBuffer.allocate(pktSize);
while (endOffset > offset) {
+ manageOsCache();
long len = sendPacket(pktBuf, maxChunksPerPacket, streamForSendChunks,
transferTo, throttler);
offset += len;
@@ -595,22 +646,45 @@ class BlockSender implements java.io.Closeable {
}
return totalRead;
}
-
+
/**
- * Write checksum header to the output stream
+ * Manage the OS buffer cache by performing read-ahead
+ * and drop-behind.
*/
- private void writeChecksumHeader(DataOutputStream out) throws IOException {
- try {
- checksum.writeHeader(out);
- if (chunkOffsetOK) {
- out.writeLong(offset);
+ private void manageOsCache() throws IOException {
+ if (!isLongRead() || blockInFd == null) {
+ // don't manage cache manually for short-reads, like
+ // HBase random read workloads.
+ return;
+ }
+
+ // Perform readahead if necessary
+ if (readaheadLength > 0 && readaheadPool != null) {
+ curReadahead = readaheadPool.readaheadStream(
+ clientTraceFmt, blockInFd,
+ offset, readaheadLength, Long.MAX_VALUE,
+ curReadahead);
+ }
+
+ // Drop what we've just read from cache, since we aren't
+ // likely to need it again
+ long nextCacheDropOffset = lastCacheDropOffset + CACHE_DROP_INTERVAL_BYTES;
+ if (shouldDropCacheBehindRead &&
+ offset >= nextCacheDropOffset) {
+ long dropLength = offset - lastCacheDropOffset;
+ if (dropLength >= 1024) {
+ NativeIO.posixFadviseIfPossible(blockInFd,
+ lastCacheDropOffset, dropLength,
+ NativeIO.POSIX_FADV_DONTNEED);
}
- out.flush();
- } catch (IOException e) { //socket error
- throw ioeToSocketException(e);
+ lastCacheDropOffset += CACHE_DROP_INTERVAL_BYTES;
}
}
-
+
+ private boolean isLongRead() {
+ return (endOffset - offset) > LONG_READ_THRESHOLD_BYTES;
+ }
+
/**
* Write packet header into {@code pkt}
*/
@@ -624,4 +698,19 @@ class BlockSender implements java.io.Closeable {
boolean didSendEntireByteRange() {
return sentEntireByteRange;
}
+
+ /**
+ * @return the checksum type that will be used with this block transfer.
+ */
+ DataChecksum getChecksum() {
+ return checksum;
+ }
+
+ /**
+ * @return the offset into the block file where the sender is currently
+ * reading.
+ */
+ long getOffset() {
+ return offset;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
index d4f5bc19f75..d496c6a2cc4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
@@ -104,6 +104,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HDFSPolicyProvider;
import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -123,6 +124,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.DNTransferAckProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeProtocolServerSideTranslatorR23;
import org.apache.hadoop.hdfs.security.token.block.BlockPoolTokenSecretManager;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@ -150,12 +152,16 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
+import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolServerSideTranslatorR23;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeProtocolTranslatorR23;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeWireProtocol;
import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
import org.apache.hadoop.hdfs.web.resources.Param;
import org.apache.hadoop.http.HttpServer;
@@ -413,6 +419,11 @@ public class DataNode extends Configured
int socketTimeout;
int socketWriteTimeout = 0;
boolean transferToAllowed = true;
+ private boolean dropCacheBehindWrites = false;
+ private boolean syncBehindWrites = false;
+ private boolean dropCacheBehindReads = false;
+ private long readaheadLength = 0;
+
int writePacketSize = 0;
boolean isBlockTokenEnabled;
BlockPoolTokenSecretManager blockPoolTokenSecretManager;
@@ -496,6 +507,20 @@ public class DataNode extends Configured
DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT);
this.writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
+
+ this.readaheadLength = conf.getLong(
+ DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_KEY,
+ DFSConfigKeys.DFS_DATANODE_READAHEAD_BYTES_DEFAULT);
+ this.dropCacheBehindWrites = conf.getBoolean(
+ DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_KEY,
+ DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_WRITES_DEFAULT);
+ this.syncBehindWrites = conf.getBoolean(
+ DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_KEY,
+ DFSConfigKeys.DFS_DATANODE_SYNC_BEHIND_WRITES_DEFAULT);
+ this.dropCacheBehindReads = conf.getBoolean(
+ DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_KEY,
+ DFSConfigKeys.DFS_DATANODE_DROP_CACHE_BEHIND_READS_DEFAULT);
+
this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
this.initialBlockReportDelay = conf.getLong(
@@ -554,7 +579,7 @@ public class DataNode extends Configured
if (conf.getBoolean(DFS_WEBHDFS_ENABLED_KEY, DFS_WEBHDFS_ENABLED_DEFAULT)) {
infoServer.addJerseyResourcePackage(DatanodeWebHdfsMethods.class
.getPackage().getName() + ";" + Param.class.getPackage().getName(),
- "/" + WebHdfsFileSystem.PATH_PREFIX + "/*");
+ WebHdfsFileSystem.PATH_PREFIX + "/*");
}
this.infoServer.start();
}
@@ -576,13 +601,22 @@ public class DataNode extends Configured
InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
conf.get("dfs.datanode.ipc.address"));
- // Add all the RPC protocols that the Datanode implements
- ipcServer = RPC.getServer(ClientDatanodeProtocol.class, this, ipcAddr.getHostName(),
+ // Add all the RPC protocols that the Datanode implements
+ ClientDatanodeProtocolServerSideTranslatorR23
+ clientDatanodeProtocolServerTranslator =
+ new ClientDatanodeProtocolServerSideTranslatorR23(this);
+ ipcServer = RPC.getServer(
+ org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class,
+ clientDatanodeProtocolServerTranslator, ipcAddr.getHostName(),
ipcAddr.getPort(),
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
DFS_DATANODE_HANDLER_COUNT_DEFAULT),
false, conf, blockPoolTokenSecretManager);
- ipcServer.addProtocol(InterDatanodeProtocol.class, this);
+ InterDatanodeProtocolServerSideTranslatorR23
+ interDatanodeProtocolServerTranslator =
+ new InterDatanodeProtocolServerSideTranslatorR23(this);
+ ipcServer.addProtocol(InterDatanodeWireProtocol.class,
+ interDatanodeProtocolServerTranslator);
// set service-level authorization security policy
if (conf.getBoolean(
@@ -1137,8 +1171,15 @@ public class DataNode extends Configured
if (!heartbeatsDisabledForTests) {
DatanodeCommand[] cmds = sendHeartBeat();
metrics.addHeartbeat(now() - startTime);
+
+ long startProcessCommands = now();
if (!processCommand(cmds))
continue;
+ long endProcessCommands = now();
+ if (endProcessCommands - startProcessCommands > 2000) {
+ LOG.info("Took " + (endProcessCommands - startProcessCommands) +
+ "ms to process " + cmds.length + " commands from NN");
+ }
}
}
if (pendingReceivedRequests > 0
@@ -1412,7 +1453,7 @@ public class DataNode extends Configured
}
break;
case DatanodeProtocol.DNA_FINALIZE:
- storage.finalizeUpgrade(((DatanodeCommand.Finalize) cmd)
+ storage.finalizeUpgrade(((FinalizeCommand) cmd)
.getBlockPoolId());
break;
case UpgradeCommand.UC_ACTION_START_UPGRADE:
@@ -1634,15 +1675,13 @@ public class DataNode extends Configured
if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
InterDatanodeProtocol.LOG.debug("InterDatanodeProtocol addr=" + addr);
}
- UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
+ final UserGroupInformation loginUgi = UserGroupInformation.getLoginUser();
try {
return loginUgi
.doAs(new PrivilegedExceptionAction() {
public InterDatanodeProtocol run() throws IOException {
- return (InterDatanodeProtocol) RPC.getProxy(
- InterDatanodeProtocol.class, InterDatanodeProtocol.versionID,
- addr, UserGroupInformation.getCurrentUser(), conf,
- NetUtils.getDefaultSocketFactory(conf), socketTimeout);
+ return new InterDatanodeProtocolTranslatorR23(addr, loginUgi,
+ conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
});
} catch (InterruptedException ie) {
@@ -1878,7 +1917,7 @@ public class DataNode extends Configured
nn.reportBadBlocks(new LocatedBlock[]{
new LocatedBlock(block, new DatanodeInfo[] {
new DatanodeInfo(bpReg)})});
- LOG.info("Can't replicate block " + block
+ LOG.warn("Can't replicate block " + block
+ " because on-disk length " + onDiskLength
+ " is shorter than NameNode recorded length " + block.getNumBytes());
return;
@@ -2058,7 +2097,7 @@ public class DataNode extends Configured
out = new DataOutputStream(new BufferedOutputStream(baseStream,
HdfsConstants.SMALL_BUFFER_SIZE));
blockSender = new BlockSender(b, 0, b.getNumBytes(),
- false, false, false, DataNode.this, null);
+ false, false, DataNode.this, null);
DatanodeInfo srcNode = new DatanodeInfo(bpReg);
//
@@ -2071,7 +2110,7 @@ public class DataNode extends Configured
}
new Sender(out).writeBlock(b, accessToken, clientname, targets, srcNode,
- stage, 0, 0, 0, 0);
+ stage, 0, 0, 0, 0, blockSender.getChecksum());
// send data & checksum
blockSender.sendBlock(out, baseStream, null);
@@ -2884,4 +2923,20 @@ public class DataNode extends Configured
(DataXceiverServer) this.dataXceiverServer.getRunnable();
return dxcs.balanceThrottler.getBandwidth();
}
+
+ long getReadaheadLength() {
+ return readaheadLength;
+ }
+
+ boolean shouldDropCacheBehindWrites() {
+ return dropCacheBehindWrites;
+ }
+
+ boolean shouldDropCacheBehindReads() {
+ return dropCacheBehindReads;
+ }
+
+ boolean shouldSyncBehindWrites() {
+ return syncBehindWrites;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
index fdcdc18a341..d6a3963c0b1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
@@ -44,12 +44,16 @@ import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.Receiver;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProtoOrBuilder;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ClientReadStatusProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.OpBlockChecksumResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
@@ -92,7 +96,6 @@ class DataXceiver extends Receiver implements Runnable {
this.isLocal = s.getInetAddress().equals(s.getLocalAddress());
this.datanode = datanode;
this.dataXceiverServer = dataXceiverServer;
- dataXceiverServer.childSockets.put(s, s);
remoteAddress = s.getRemoteSocketAddress().toString();
localAddress = s.getLocalSocketAddress().toString();
@@ -129,6 +132,7 @@ class DataXceiver extends Receiver implements Runnable {
public void run() {
int opsProcessed = 0;
Op op = null;
+ dataXceiverServer.childSockets.put(s, s);
try {
int stdTimeout = s.getSoTimeout();
@@ -223,15 +227,17 @@ class DataXceiver extends Receiver implements Runnable {
try {
try {
blockSender = new BlockSender(block, blockOffset, length,
- true, true, false, datanode, clientTraceFmt);
+ true, false, datanode, clientTraceFmt);
} catch(IOException e) {
- LOG.info("opReadBlock " + block + " received exception " + e);
- sendResponse(s, ERROR, datanode.socketWriteTimeout);
+ String msg = "opReadBlock " + block + " received exception " + e;
+ LOG.info(msg);
+ sendResponse(s, ERROR, msg, datanode.socketWriteTimeout);
throw e;
}
// send op status
- sendResponse(s, SUCCESS, datanode.socketWriteTimeout);
+ writeSuccessWithChecksumInfo(blockSender,
+ getStreamWithTimeout(s, datanode.socketWriteTimeout));
long read = blockSender.sendBlock(out, baseStream, null); // send data
@@ -289,7 +295,8 @@ class DataXceiver extends Receiver implements Runnable {
final int pipelineSize,
final long minBytesRcvd,
final long maxBytesRcvd,
- final long latestGenerationStamp) throws IOException {
+ final long latestGenerationStamp,
+ DataChecksum requestedChecksum) throws IOException {
updateCurrentThreadName("Receiving block " + block + " client=" + clientname);
final boolean isDatanode = clientname.length() == 0;
final boolean isClient = !isDatanode;
@@ -348,7 +355,7 @@ class DataXceiver extends Receiver implements Runnable {
s.getRemoteSocketAddress().toString(),
s.getLocalSocketAddress().toString(),
stage, latestGenerationStamp, minBytesRcvd, maxBytesRcvd,
- clientname, srcDataNode, datanode);
+ clientname, srcDataNode, datanode, requestedChecksum);
} else {
datanode.data.recoverClose(block, latestGenerationStamp, minBytesRcvd);
}
@@ -378,11 +385,8 @@ class DataXceiver extends Receiver implements Runnable {
new Sender(mirrorOut).writeBlock(originalBlock, blockToken,
clientname, targets, srcDataNode, stage, pipelineSize,
- minBytesRcvd, maxBytesRcvd, latestGenerationStamp);
+ minBytesRcvd, maxBytesRcvd, latestGenerationStamp, requestedChecksum);
- if (blockReceiver != null) { // send checksum header
- blockReceiver.writeChecksumHeader(mirrorOut);
- }
mirrorOut.flush();
// read connect ack (only for clients, not for replication req)
@@ -452,7 +456,7 @@ class DataXceiver extends Receiver implements Runnable {
if (LOG.isTraceEnabled()) {
LOG.trace("TRANSFER: send close-ack");
}
- writeResponse(SUCCESS, replyOut);
+ writeResponse(SUCCESS, null, replyOut);
}
}
@@ -507,7 +511,7 @@ class DataXceiver extends Receiver implements Runnable {
NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
try {
datanode.transferReplicaForPipelineRecovery(blk, targets, clientName);
- writeResponse(Status.SUCCESS, out);
+ writeResponse(Status.SUCCESS, null, out);
} finally {
IOUtils.closeStream(out);
}
@@ -577,16 +581,17 @@ class DataXceiver extends Receiver implements Runnable {
LOG.warn("Invalid access token in request from " + remoteAddress
+ " for OP_COPY_BLOCK for block " + block + " : "
+ e.getLocalizedMessage());
- sendResponse(s, ERROR_ACCESS_TOKEN, datanode.socketWriteTimeout);
+ sendResponse(s, ERROR_ACCESS_TOKEN, "Invalid access token", datanode.socketWriteTimeout);
return;
}
}
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
- LOG.info("Not able to copy block " + block.getBlockId() + " to "
- + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
- sendResponse(s, ERROR, datanode.socketWriteTimeout);
+ String msg = "Not able to copy block " + block.getBlockId() + " to "
+ + s.getRemoteSocketAddress() + " because threads quota is exceeded.";
+ LOG.info(msg);
+ sendResponse(s, ERROR, msg, datanode.socketWriteTimeout);
return;
}
@@ -596,8 +601,8 @@ class DataXceiver extends Receiver implements Runnable {
try {
// check if the block exists or not
- blockSender = new BlockSender(block, 0, -1, false, false, false,
- datanode, null);
+ blockSender = new BlockSender(block, 0, -1, false, false, datanode,
+ null);
// set up response stream
OutputStream baseStream = NetUtils.getOutputStream(
@@ -606,7 +611,7 @@ class DataXceiver extends Receiver implements Runnable {
baseStream, HdfsConstants.SMALL_BUFFER_SIZE));
// send status first
- writeResponse(SUCCESS, reply);
+ writeSuccessWithChecksumInfo(blockSender, reply);
// send block content to the target
long read = blockSender.sendBlock(reply, baseStream,
dataXceiverServer.balanceThrottler);
@@ -653,21 +658,24 @@ class DataXceiver extends Receiver implements Runnable {
LOG.warn("Invalid access token in request from " + remoteAddress
+ " for OP_REPLACE_BLOCK for block " + block + " : "
+ e.getLocalizedMessage());
- sendResponse(s, ERROR_ACCESS_TOKEN, datanode.socketWriteTimeout);
+ sendResponse(s, ERROR_ACCESS_TOKEN, "Invalid access token",
+ datanode.socketWriteTimeout);
return;
}
}
if (!dataXceiverServer.balanceThrottler.acquire()) { // not able to start
- LOG.warn("Not able to receive block " + block.getBlockId() + " from "
- + s.getRemoteSocketAddress() + " because threads quota is exceeded.");
- sendResponse(s, ERROR, datanode.socketWriteTimeout);
+ String msg = "Not able to receive block " + block.getBlockId() + " from "
+ + s.getRemoteSocketAddress() + " because threads quota is exceeded.";
+ LOG.warn(msg);
+ sendResponse(s, ERROR, msg, datanode.socketWriteTimeout);
return;
}
Socket proxySock = null;
DataOutputStream proxyOut = null;
Status opStatus = SUCCESS;
+ String errMsg = null;
BlockReceiver blockReceiver = null;
DataInputStream proxyReply = null;
@@ -702,11 +710,16 @@ class DataXceiver extends Receiver implements Runnable {
throw new IOException("Copy block " + block + " from "
+ proxySock.getRemoteSocketAddress() + " failed");
}
+
+ // get checksum info about the block we're copying
+ ReadOpChecksumInfoProto checksumInfo = copyResponse.getReadOpChecksumInfo();
+ DataChecksum remoteChecksum = DataTransferProtoUtil.fromProto(
+ checksumInfo.getChecksum());
// open a block receiver and check if the block does not exist
blockReceiver = new BlockReceiver(
block, proxyReply, proxySock.getRemoteSocketAddress().toString(),
proxySock.getLocalSocketAddress().toString(),
- null, 0, 0, 0, "", null, datanode);
+ null, 0, 0, 0, "", null, datanode, remoteChecksum);
// receive a block
blockReceiver.receiveBlock(null, null, null, null,
@@ -720,7 +733,8 @@ class DataXceiver extends Receiver implements Runnable {
} catch (IOException ioe) {
opStatus = ERROR;
- LOG.info("opReplaceBlock " + block + " received exception " + ioe);
+ errMsg = "opReplaceBlock " + block + " received exception " + ioe;
+ LOG.info(errMsg);
throw ioe;
} finally {
// receive the last byte that indicates the proxy released its thread resource
@@ -736,7 +750,7 @@ class DataXceiver extends Receiver implements Runnable {
// send response back
try {
- sendResponse(s, opStatus, datanode.socketWriteTimeout);
+ sendResponse(s, opStatus, errMsg, datanode.socketWriteTimeout);
} catch (IOException ioe) {
LOG.warn("Error writing reply back to " + s.getRemoteSocketAddress());
}
@@ -759,20 +773,41 @@ class DataXceiver extends Receiver implements Runnable {
* @param opStatus status message to write
* @param timeout send timeout
**/
- private void sendResponse(Socket s, Status status,
+ private static void sendResponse(Socket s, Status status, String message,
long timeout) throws IOException {
- DataOutputStream reply =
- new DataOutputStream(NetUtils.getOutputStream(s, timeout));
+ DataOutputStream reply = getStreamWithTimeout(s, timeout);
- writeResponse(status, reply);
+ writeResponse(status, message, reply);
}
- private void writeResponse(Status status, OutputStream out)
+ private static DataOutputStream getStreamWithTimeout(Socket s, long timeout)
+ throws IOException {
+ return new DataOutputStream(NetUtils.getOutputStream(s, timeout));
+ }
+
+ private static void writeResponse(Status status, String message, OutputStream out)
throws IOException {
- BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
- .setStatus(status)
+ BlockOpResponseProto.Builder response = BlockOpResponseProto.newBuilder()
+ .setStatus(status);
+ if (message != null) {
+ response.setMessage(message);
+ }
+ response.build().writeDelimitedTo(out);
+ out.flush();
+ }
+
+ private void writeSuccessWithChecksumInfo(BlockSender blockSender,
+ DataOutputStream out) throws IOException {
+
+ ReadOpChecksumInfoProto ckInfo = ReadOpChecksumInfoProto.newBuilder()
+ .setChecksum(DataTransferProtoUtil.toProto(blockSender.getChecksum()))
+ .setChunkOffset(blockSender.getOffset())
+ .build();
+
+ BlockOpResponseProto response = BlockOpResponseProto.newBuilder()
+ .setStatus(SUCCESS)
+ .setReadOpChecksumInfo(ckInfo)
.build();
-
response.writeDelimitedTo(out);
out.flush();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
index f192747db59..c0d782a5c7a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiverServer.java
@@ -30,7 +30,6 @@ import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.HdfsConstants;
import org.apache.hadoop.hdfs.server.balancer.Balancer;
import org.apache.hadoop.hdfs.util.DataTransferThrottler;
import org.apache.hadoop.io.IOUtils;
@@ -132,17 +131,12 @@ class DataXceiverServer implements Runnable {
@Override
public void run() {
while (datanode.shouldRun) {
+ Socket s = null;
try {
- Socket s = ss.accept();
+ s = ss.accept();
s.setTcpNoDelay(true);
- final DataXceiver exciver;
- try {
- exciver = new DataXceiver(s, datanode, this);
- } catch(IOException e) {
- IOUtils.closeSocket(s);
- throw e;
- }
- new Daemon(datanode.threadGroup, exciver).start();
+ new Daemon(datanode.threadGroup, new DataXceiver(s, datanode, this))
+ .start();
} catch (SocketTimeoutException ignored) {
// wake up to see if should continue to run
} catch (AsynchronousCloseException ace) {
@@ -152,7 +146,19 @@ class DataXceiverServer implements Runnable {
LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ace);
}
} catch (IOException ie) {
+ IOUtils.closeSocket(s);
LOG.warn(datanode.getMachineName() + ":DataXceiverServer: ", ie);
+ } catch (OutOfMemoryError ie) {
+ IOUtils.closeSocket(s);
+ // DataNode can run out of memory if there is too many transfers.
+ // Log the event, Sleep for 30 seconds, other transfers may complete by
+ // then.
+ LOG.warn("DataNode is out of memory. Will retry in 30 seconds.", ie);
+ try {
+ Thread.sleep(30 * 1000);
+ } catch (InterruptedException e) {
+ // ignore
+ }
} catch (Throwable te) {
LOG.error(datanode.getMachineName()
+ ":DataXceiverServer: Exiting due to: ", te);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
index 5ecdca7b793..512d0b64bb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
@@ -59,7 +59,6 @@ import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
-import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.metrics2.util.MBeans;
@@ -1263,8 +1262,8 @@ public class FSDataset implements FSDatasetInterface {
throws IOException {
File f = validateBlockFile(bpid, b);
if(f == null) {
- if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
- InterDatanodeProtocol.LOG.debug("b=" + b + ", volumeMap=" + volumeMap);
+ if (DataNode.LOG.isDebugEnabled()) {
+ DataNode.LOG.debug("b=" + b + ", volumeMap=" + volumeMap);
}
throw new IOException("Block " + b + " is not valid.");
}
@@ -2003,8 +2002,8 @@ public class FSDataset implements FSDatasetInterface {
datanode.checkDiskError();
}
- if (InterDatanodeProtocol.LOG.isDebugEnabled()) {
- InterDatanodeProtocol.LOG.debug("b=" + b + ", f=" + f);
+ if (DataNode.LOG.isDebugEnabled()) {
+ DataNode.LOG.debug("b=" + b + ", f=" + f);
}
return null;
}
@@ -2088,10 +2087,9 @@ public class FSDataset implements FSDatasetInterface {
volumeMap.remove(bpid, invalidBlks[i]);
}
File metaFile = getMetaFile(f, invalidBlks[i].getGenerationStamp());
- long dfsBytes = f.length() + metaFile.length();
// Delete the block asynchronously to make sure we can do it fast enough
- asyncDiskService.deleteAsync(v, f, metaFile, dfsBytes,
+ asyncDiskService.deleteAsync(v, f, metaFile,
new ExtendedBlock(bpid, invalidBlks[i]));
}
if (error) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
index 4b49b05eeae..408a6afc472 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetAsyncDiskService.java
@@ -152,11 +152,11 @@ class FSDatasetAsyncDiskService {
* dfsUsed statistics accordingly.
*/
void deleteAsync(FSDataset.FSVolume volume, File blockFile, File metaFile,
- long dfsBytes, ExtendedBlock block) {
+ ExtendedBlock block) {
DataNode.LOG.info("Scheduling block " + block.getLocalBlock().toString()
+ " file " + blockFile + " for deletion");
ReplicaFileDeleteTask deletionTask = new ReplicaFileDeleteTask(dataset,
- volume, blockFile, metaFile, dfsBytes, block);
+ volume, blockFile, metaFile, block);
execute(volume.getCurrentDir(), deletionTask);
}
@@ -168,16 +168,14 @@ class FSDatasetAsyncDiskService {
final FSDataset.FSVolume volume;
final File blockFile;
final File metaFile;
- final long dfsBytes;
final ExtendedBlock block;
ReplicaFileDeleteTask(FSDataset dataset, FSDataset.FSVolume volume, File blockFile,
- File metaFile, long dfsBytes, ExtendedBlock block) {
+ File metaFile, ExtendedBlock block) {
this.dataset = dataset;
this.volume = volume;
this.blockFile = blockFile;
this.metaFile = metaFile;
- this.dfsBytes = dfsBytes;
this.block = block;
}
@@ -195,6 +193,7 @@ class FSDatasetAsyncDiskService {
@Override
public void run() {
+ long dfsBytes = blockFile.length() + metaFile.length();
if ( !blockFile.delete() || ( !metaFile.delete() && metaFile.exists() ) ) {
DataNode.LOG.warn("Unexpected error trying to delete block "
+ block.getBlockPoolId() + " " + block.getLocalBlock().toString()
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
index dd53da32793..e8c00ca005e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/resources/DatanodeWebHdfsMethods.java
@@ -27,6 +27,7 @@ import java.security.PrivilegedExceptionAction;
import java.util.EnumSet;
import javax.servlet.ServletContext;
+import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.Consumes;
import javax.ws.rs.DefaultValue;
import javax.ws.rs.GET;
@@ -47,6 +48,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSClient;
import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -77,7 +79,35 @@ import com.sun.jersey.spi.container.ResourceFilters;
public class DatanodeWebHdfsMethods {
public static final Log LOG = LogFactory.getLog(DatanodeWebHdfsMethods.class);
+ private static final UriFsPathParam ROOT = new UriFsPathParam("");
+
private @Context ServletContext context;
+ private @Context HttpServletResponse response;
+
+ /** Handle HTTP PUT request for the root. */
+ @PUT
+ @Path("/")
+ @Consumes({"*/*"})
+ @Produces({MediaType.APPLICATION_JSON})
+ public Response putRoot(
+ final InputStream in,
+ @Context final UserGroupInformation ugi,
+ @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT)
+ final PutOpParam op,
+ @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
+ final PermissionParam permission,
+ @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT)
+ final OverwriteParam overwrite,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize,
+ @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
+ final ReplicationParam replication,
+ @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT)
+ final BlockSizeParam blockSize
+ ) throws IOException, InterruptedException {
+ return put(in, ugi, ROOT, op, permission, overwrite, bufferSize,
+ replication, blockSize);
+ }
/** Handle HTTP PUT request. */
@PUT
@@ -100,7 +130,7 @@ public class DatanodeWebHdfsMethods {
final ReplicationParam replication,
@QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT)
final BlockSizeParam blockSize
- ) throws IOException, URISyntaxException, InterruptedException {
+ ) throws IOException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
@@ -108,6 +138,9 @@ public class DatanodeWebHdfsMethods {
replication, blockSize));
}
+ //clear content type
+ response.setContentType(null);
+
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException, URISyntaxException {
@@ -120,17 +153,25 @@ public class DatanodeWebHdfsMethods {
{
final Configuration conf = new Configuration(datanode.getConf());
final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
- final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ conf.set(FsPermission.UMASK_LABEL, "000");
+
final int b = bufferSize.getValue(conf);
- final FSDataOutputStream out = new FSDataOutputStream(dfsclient.create(
- fullpath, permission.getFsPermission(),
- overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
- : EnumSet.of(CreateFlag.CREATE),
- replication.getValue(), blockSize.getValue(conf), null, b), null);
+ DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ FSDataOutputStream out = null;
try {
+ out = new FSDataOutputStream(dfsclient.create(
+ fullpath, permission.getFsPermission(),
+ overwrite.getValue() ? EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE)
+ : EnumSet.of(CreateFlag.CREATE),
+ replication.getValue(conf), blockSize.getValue(conf), null, b), null);
IOUtils.copyBytes(in, out, b);
- } finally {
out.close();
+ out = null;
+ dfsclient.close();
+ dfsclient = null;
+ } finally {
+ IOUtils.cleanup(LOG, out);
+ IOUtils.cleanup(LOG, dfsclient);
}
final InetSocketAddress nnHttpAddr = NameNode.getHttpAddress(conf);
final URI uri = new URI(WebHdfsFileSystem.SCHEME, null,
@@ -144,6 +185,22 @@ public class DatanodeWebHdfsMethods {
});
}
+ /** Handle HTTP POST request for the root for the root. */
+ @POST
+ @Path("/")
+ @Consumes({"*/*"})
+ @Produces({MediaType.APPLICATION_JSON})
+ public Response postRoot(
+ final InputStream in,
+ @Context final UserGroupInformation ugi,
+ @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT)
+ final PostOpParam op,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize
+ ) throws IOException, InterruptedException {
+ return post(in, ugi, ROOT, op, bufferSize);
+ }
+
/** Handle HTTP POST request. */
@POST
@Path("{" + UriFsPathParam.NAME + ":.*}")
@@ -157,13 +214,16 @@ public class DatanodeWebHdfsMethods {
final PostOpParam op,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
- ) throws IOException, URISyntaxException, InterruptedException {
+ ) throws IOException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
+ Param.toSortedString(", ", bufferSize));
}
+ //clear content type
+ response.setContentType(null);
+
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException {
@@ -176,13 +236,19 @@ public class DatanodeWebHdfsMethods {
{
final Configuration conf = new Configuration(datanode.getConf());
final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
- final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
final int b = bufferSize.getValue(conf);
- final FSDataOutputStream out = dfsclient.append(fullpath, b, null, null);
+ DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ FSDataOutputStream out = null;
try {
+ out = dfsclient.append(fullpath, b, null, null);
IOUtils.copyBytes(in, out, b);
- } finally {
out.close();
+ out = null;
+ dfsclient.close();
+ dfsclient = null;
+ } finally {
+ IOUtils.cleanup(LOG, out);
+ IOUtils.cleanup(LOG, dfsclient);
}
return Response.ok().type(MediaType.APPLICATION_JSON).build();
}
@@ -193,6 +259,24 @@ public class DatanodeWebHdfsMethods {
});
}
+ /** Handle HTTP GET request for the root. */
+ @GET
+ @Path("/")
+ @Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
+ public Response getRoot(
+ @Context final UserGroupInformation ugi,
+ @QueryParam(GetOpParam.NAME) @DefaultValue(GetOpParam.DEFAULT)
+ final GetOpParam op,
+ @QueryParam(OffsetParam.NAME) @DefaultValue(OffsetParam.DEFAULT)
+ final OffsetParam offset,
+ @QueryParam(LengthParam.NAME) @DefaultValue(LengthParam.DEFAULT)
+ final LengthParam length,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize
+ ) throws IOException, InterruptedException {
+ return get(ugi, ROOT, op, offset, length, bufferSize);
+ }
+
/** Handle HTTP GET request. */
@GET
@Path("{" + UriFsPathParam.NAME + ":.*}")
@@ -208,13 +292,16 @@ public class DatanodeWebHdfsMethods {
final LengthParam length,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
- ) throws IOException, URISyntaxException, InterruptedException {
+ ) throws IOException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
+ Param.toSortedString(", ", offset, length, bufferSize));
}
+ //clear content type
+ response.setContentType(null);
+
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException {
@@ -223,32 +310,62 @@ public class DatanodeWebHdfsMethods {
final DataNode datanode = (DataNode)context.getAttribute("datanode");
final Configuration conf = new Configuration(datanode.getConf());
final InetSocketAddress nnRpcAddr = NameNode.getAddress(conf);
- final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
switch(op.getValue()) {
case OPEN:
{
final int b = bufferSize.getValue(conf);
- final DFSDataInputStream in = new DFSClient.DFSDataInputStream(
- dfsclient.open(fullpath, b, true));
- in.seek(offset.getValue());
-
+ final DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ DFSDataInputStream in = null;
+ try {
+ in = new DFSClient.DFSDataInputStream(
+ dfsclient.open(fullpath, b, true));
+ in.seek(offset.getValue());
+ } catch(IOException ioe) {
+ IOUtils.cleanup(LOG, in);
+ IOUtils.cleanup(LOG, dfsclient);
+ throw ioe;
+ }
+ final DFSDataInputStream dis = in;
final StreamingOutput streaming = new StreamingOutput() {
@Override
public void write(final OutputStream out) throws IOException {
final Long n = length.getValue();
- if (n == null) {
- IOUtils.copyBytes(in, out, b);
- } else {
- IOUtils.copyBytes(in, out, n, false);
+ DFSDataInputStream dfsin = dis;
+ DFSClient client = dfsclient;
+ try {
+ if (n == null) {
+ IOUtils.copyBytes(dfsin, out, b);
+ } else {
+ IOUtils.copyBytes(dfsin, out, n, false);
+ }
+ dfsin.close();
+ dfsin = null;
+ dfsclient.close();
+ client = null;
+ } finally {
+ IOUtils.cleanup(LOG, dfsin);
+ IOUtils.cleanup(LOG, client);
}
}
};
- return Response.ok(streaming).type(MediaType.APPLICATION_OCTET_STREAM).build();
+
+ final int status = offset.getValue() == 0?
+ HttpServletResponse.SC_OK: HttpServletResponse.SC_PARTIAL_CONTENT;
+ return Response.status(status).entity(streaming).type(
+ MediaType.APPLICATION_OCTET_STREAM).build();
}
case GETFILECHECKSUM:
{
- final MD5MD5CRC32FileChecksum checksum = dfsclient.getFileChecksum(fullpath);
+ MD5MD5CRC32FileChecksum checksum = null;
+ DFSClient dfsclient = new DFSClient(nnRpcAddr, conf);
+ try {
+ checksum = dfsclient.getFileChecksum(fullpath);
+ dfsclient.close();
+ dfsclient = null;
+ } finally {
+ IOUtils.cleanup(LOG, dfsclient);
+ }
final String js = JsonUtil.toJsonString(checksum);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
index 69766203411..2bd585e236b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java
@@ -77,6 +77,9 @@ class BackupJournalManager implements JournalManager {
public void recoverUnfinalizedSegments() throws IOException {
}
+ @Override
+ public void close() throws IOException {}
+
public boolean matchesRegistration(NamenodeRegistration bnReg) {
return bnReg.getAddress().equals(this.bnReg.getAddress());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
index 1e8be5b7075..c26f84a1ed5 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
@@ -28,10 +28,13 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolServerSideTranslatorR23;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
+import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -135,6 +138,16 @@ public class BackupNode extends NameNode {
CommonConfigurationKeys.FS_TRASH_INTERVAL_DEFAULT);
NamespaceInfo nsInfo = handshake(conf);
super.initialize(conf);
+
+ if (false == namesystem.isInSafeMode()) {
+ namesystem.setSafeMode(SafeModeAction.SAFEMODE_ENTER);
+ }
+
+ // Backup node should never do lease recovery,
+ // therefore lease hard limit should never expire.
+ namesystem.leaseManager.setLeasePeriod(
+ HdfsConstants.LEASE_SOFTLIMIT_PERIOD, Long.MAX_VALUE);
+
clusterId = nsInfo.getClusterID();
blockPoolId = nsInfo.getBlockPoolID();
@@ -171,7 +184,9 @@ public class BackupNode extends NameNode {
}
}
// Stop the RPC client
- RPC.stopProxy(namenode);
+ if (namenode != null) {
+ RPC.stopProxy(namenode);
+ }
namenode = null;
// Stop the checkpoint manager
if(checkpointManager != null) {
@@ -181,14 +196,23 @@ public class BackupNode extends NameNode {
// Stop name-node threads
super.stop();
}
-
- static class BackupNodeRpcServer extends NameNodeRpcServer implements JournalProtocol {
+
+ /* @Override */// NameNode
+ public boolean setSafeMode(SafeModeAction action) throws IOException {
+ throw new UnsupportedActionException("setSafeMode");
+ }
+
+ static class BackupNodeRpcServer extends NameNodeRpcServer implements
+ JournalProtocol {
private final String nnRpcAddress;
private BackupNodeRpcServer(Configuration conf, BackupNode nn)
throws IOException {
super(conf, nn);
- this.server.addProtocol(JournalProtocol.class, this);
+ JournalProtocolServerSideTranslatorR23 journalProtocolTranslator =
+ new JournalProtocolServerSideTranslatorR23(this);
+ this.clientRpcServer.addProtocol(JournalWireProtocol.class,
+ journalProtocolTranslator);
nnRpcAddress = nn.nnRpcAddress;
}
@@ -197,9 +221,8 @@ public class BackupNode extends NameNode {
throws IOException {
if (protocol.equals(JournalProtocol.class.getName())) {
return JournalProtocol.versionID;
- } else {
- return super.getProtocolVersion(protocol, clientVersion);
}
+ return super.getProtocolVersion(protocol, clientVersion);
}
/////////////////////////////////////////////////////
@@ -250,7 +273,7 @@ public class BackupNode extends NameNode {
// connect to name node
InetSocketAddress nnAddress = NameNode.getServiceAddress(conf, true);
this.namenode =
- (NamenodeProtocol) RPC.waitForProxy(NamenodeProtocol.class,
+ RPC.waitForProxy(NamenodeProtocol.class,
NamenodeProtocol.versionID, nnAddress, conf);
this.nnRpcAddress = getHostPortString(nnAddress);
this.nnHttpAddress = getHostPortString(super.getHttpServerAddress(conf));
@@ -264,7 +287,9 @@ public class BackupNode extends NameNode {
LOG.info("Problem connecting to server: " + nnAddress);
try {
Thread.sleep(1000);
- } catch (InterruptedException ie) {}
+ } catch (InterruptedException ie) {
+ LOG.warn("Encountered exception ", e);
+ }
}
}
return nsInfo;
@@ -313,7 +338,9 @@ public class BackupNode extends NameNode {
LOG.info("Problem connecting to name-node: " + nnRpcAddress);
try {
Thread.sleep(1000);
- } catch (InterruptedException ie) {}
+ } catch (InterruptedException ie) {
+ LOG.warn("Encountered exception ", e);
+ }
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
index 0db5cb11138..5f5ebaf7481 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
@@ -37,9 +37,7 @@ public class CheckpointSignature extends StorageInfo
implements WritableComparable {
private static final String FIELD_SEPARATOR = ":";
private static final int NUM_FIELDS = 7;
-
String blockpoolID = "";
-
long mostRecentCheckpointTxId;
long curSegmentTxId;
@@ -67,6 +65,14 @@ public class CheckpointSignature extends StorageInfo
blockpoolID = fields[i++];
}
+ public CheckpointSignature(StorageInfo info, String blockpoolID,
+ long mostRecentCheckpointTxId, long curSegmentTxId) {
+ super(info);
+ this.blockpoolID = blockpoolID;
+ this.mostRecentCheckpointTxId = mostRecentCheckpointTxId;
+ this.curSegmentTxId = curSegmentTxId;
+ }
+
/**
* Get the cluster id from CheckpointSignature
* @return the cluster id
@@ -83,6 +89,14 @@ public class CheckpointSignature extends StorageInfo
return blockpoolID;
}
+ public long getMostRecentCheckpointTxId() {
+ return mostRecentCheckpointTxId;
+ }
+
+ public long getCurSegmentTxId() {
+ return curSegmentTxId;
+ }
+
/**
* Set the block pool id of CheckpointSignature.
*
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
index ea3863ca1e4..84408c01622 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
@@ -241,8 +241,12 @@ class Checkpointer extends Daemon {
rollForwardByApplyingLogs(manifest, bnImage, backupNode.getNamesystem());
}
-
+
long txid = bnImage.getLastAppliedTxId();
+
+ backupNode.namesystem.dir.setReady();
+ backupNode.namesystem.setBlockTotal();
+
bnImage.saveFSImageInAllDirs(backupNode.getNamesystem(), txid);
bnStorage.writeAll();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
index 836c22d0148..067990d01b9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogBackupOutputStream.java
@@ -22,6 +22,7 @@ import java.net.InetSocketAddress;
import java.util.Arrays;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalProtocolTranslatorR23;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.protocol.JournalProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
@@ -56,8 +57,7 @@ class EditLogBackupOutputStream extends EditLogOutputStream {
NetUtils.createSocketAddr(bnRegistration.getAddress());
try {
this.backupNode =
- RPC.getProxy(JournalProtocol.class,
- JournalProtocol.versionID, bnAddress, new HdfsConfiguration());
+ new JournalProtocolTranslatorR23(bnAddress, new HdfsConfiguration());
} catch(IOException e) {
Storage.LOG.error("Error connecting to: " + bnAddress, e);
throw e;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
index 654c3a231d4..23b2e220b8f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.Options.Rename;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.UnresolvedLinkException;
+import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -158,6 +159,11 @@ public class FSDirectory implements Closeable {
*/
void imageLoadComplete() {
Preconditions.checkState(!ready, "FSDirectory already loaded");
+ setReady();
+ }
+
+ void setReady() {
+ if(ready) return;
writeLock();
try {
setReady(true);
@@ -233,7 +239,7 @@ public class FSDirectory implements Closeable {
clientMachine, clientNode);
writeLock();
try {
- newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE, false);
+ newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE);
} finally {
writeUnlock();
}
@@ -276,7 +282,7 @@ public class FSDirectory implements Closeable {
writeLock();
try {
try {
- newNode = addNode(path, newNode, diskspace, false);
+ newNode = addNode(path, newNode, diskspace);
if(newNode != null && blocks != null) {
int nrBlocks = blocks.length;
// Add file->block mapping
@@ -303,7 +309,7 @@ public class FSDirectory implements Closeable {
try {
try {
newParent = rootDir.addToParent(src, newNode, parentINode,
- false, propagateModTime);
+ propagateModTime);
cacheName(newNode);
} catch (FileNotFoundException e) {
return null;
@@ -576,7 +582,7 @@ public class FSDirectory implements Closeable {
// add src to the destination
dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1,
- srcChild, UNKNOWN_DISK_SPACE, false);
+ srcChild, UNKNOWN_DISK_SPACE);
if (dstChild != null) {
srcChild = null;
if (NameNode.stateChangeLog.isDebugEnabled()) {
@@ -593,7 +599,7 @@ public class FSDirectory implements Closeable {
// put it back
srcChild.setLocalName(srcChildName);
addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, srcChild,
- UNKNOWN_DISK_SPACE, false);
+ UNKNOWN_DISK_SPACE);
}
}
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
@@ -731,7 +737,7 @@ public class FSDirectory implements Closeable {
removedSrc.setLocalName(dstComponents[dstInodes.length - 1]);
// add src as dst to complete rename
dstChild = addChildNoQuotaCheck(dstInodes, dstInodes.length - 1,
- removedSrc, UNKNOWN_DISK_SPACE, false);
+ removedSrc, UNKNOWN_DISK_SPACE);
int filesDeleted = 0;
if (dstChild != null) {
@@ -759,13 +765,13 @@ public class FSDirectory implements Closeable {
// Rename failed - restore src
removedSrc.setLocalName(srcChildName);
addChildNoQuotaCheck(srcInodes, srcInodes.length - 1, removedSrc,
- UNKNOWN_DISK_SPACE, false);
+ UNKNOWN_DISK_SPACE);
}
if (removedDst != null) {
// Rename failed - restore dst
removedDst.setLocalName(dstChildName);
addChildNoQuotaCheck(dstInodes, dstInodes.length - 1, removedDst,
- UNKNOWN_DISK_SPACE, false);
+ UNKNOWN_DISK_SPACE);
}
}
NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
@@ -1224,13 +1230,21 @@ public class FSDirectory implements Closeable {
* Get {@link INode} associated with the file.
*/
INodeFile getFileINode(String src) throws UnresolvedLinkException {
+ INode inode = getINode(src);
+ if (inode == null || inode.isDirectory())
+ return null;
+ assert !inode.isLink();
+ return (INodeFile) inode;
+ }
+
+ /**
+ * Get {@link INode} associated with the file / directory.
+ */
+ INode getINode(String src) throws UnresolvedLinkException {
readLock();
try {
- INode inode = rootDir.getNode(src, true);
- if (inode == null || inode.isDirectory())
- return null;
- assert !inode.isLink();
- return (INodeFile)inode;
+ INode iNode = rootDir.getNode(src, true);
+ return iNode;
} finally {
readUnlock();
}
@@ -1436,9 +1450,10 @@ public class FSDirectory implements Closeable {
* @param src string representation of the path to the directory
* @param permissions the permission of the directory
- * @param inheritPermission if the permission of the directory should inherit
- * from its parent or not. The automatically created
- * ones always inherit its permission from its parent
+ * @param isAutocreate if the permission of the directory should inherit
+ * from its parent or not. u+wx is implicitly added to
+ * the automatically created directories, and to the
+ * given directory if inheritPermission is true
* @param now creation time
* @return true if the operation succeeds false otherwise
* @throws FileNotFoundException if an ancestor or itself is a file
@@ -1454,6 +1469,7 @@ public class FSDirectory implements Closeable {
String[] names = INode.getPathNames(src);
byte[][] components = INode.getPathComponents(names);
INode[] inodes = new INode[components.length];
+ final int lastInodeIndex = inodes.length - 1;
writeLock();
try {
@@ -1470,12 +1486,44 @@ public class FSDirectory implements Closeable {
}
}
+ // default to creating parent dirs with the given perms
+ PermissionStatus parentPermissions = permissions;
+
+ // if not inheriting and it's the last inode, there's no use in
+ // computing perms that won't be used
+ if (inheritPermission || (i < lastInodeIndex)) {
+ // if inheriting (ie. creating a file or symlink), use the parent dir,
+ // else the supplied permissions
+ // NOTE: the permissions of the auto-created directories violate posix
+ FsPermission parentFsPerm = inheritPermission
+ ? inodes[i-1].getFsPermission() : permissions.getPermission();
+
+ // ensure that the permissions allow user write+execute
+ if (!parentFsPerm.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
+ parentFsPerm = new FsPermission(
+ parentFsPerm.getUserAction().or(FsAction.WRITE_EXECUTE),
+ parentFsPerm.getGroupAction(),
+ parentFsPerm.getOtherAction()
+ );
+ }
+
+ if (!parentPermissions.getPermission().equals(parentFsPerm)) {
+ parentPermissions = new PermissionStatus(
+ parentPermissions.getUserName(),
+ parentPermissions.getGroupName(),
+ parentFsPerm
+ );
+ // when inheriting, use same perms for entire path
+ if (inheritPermission) permissions = parentPermissions;
+ }
+ }
+
// create directories beginning from the first null index
for(; i < inodes.length; i++) {
pathbuilder.append(Path.SEPARATOR + names[i]);
String cur = pathbuilder.toString();
- unprotectedMkdir(inodes, i, components[i], permissions,
- inheritPermission || i != components.length-1, now);
+ unprotectedMkdir(inodes, i, components[i],
+ (i < lastInodeIndex) ? parentPermissions : permissions, now);
if (inodes[i] == null) {
return false;
}
@@ -1506,7 +1554,7 @@ public class FSDirectory implements Closeable {
rootDir.getExistingPathINodes(components, inodes, false);
unprotectedMkdir(inodes, inodes.length-1, components[inodes.length-1],
- permissions, false, timestamp);
+ permissions, timestamp);
return inodes[inodes.length-1];
}
@@ -1515,19 +1563,19 @@ public class FSDirectory implements Closeable {
* All ancestors exist. Newly created one stored at index pos.
*/
private void unprotectedMkdir(INode[] inodes, int pos,
- byte[] name, PermissionStatus permission, boolean inheritPermission,
+ byte[] name, PermissionStatus permission,
long timestamp) throws QuotaExceededException {
assert hasWriteLock();
inodes[pos] = addChild(inodes, pos,
new INodeDirectory(name, permission, timestamp),
- -1, inheritPermission );
+ -1);
}
/** Add a node child to the namespace. The full path name of the node is src.
* childDiskspace should be -1, if unknown.
* QuotaExceededException is thrown if it violates quota limit */
private T addNode(String src, T child,
- long childDiskspace, boolean inheritPermission)
+ long childDiskspace)
throws QuotaExceededException, UnresolvedLinkException {
byte[][] components = INode.getPathComponents(src);
byte[] path = components[components.length-1];
@@ -1537,8 +1585,7 @@ public class FSDirectory implements Closeable {
writeLock();
try {
rootDir.getExistingPathINodes(components, inodes, false);
- return addChild(inodes, inodes.length-1, child, childDiskspace,
- inheritPermission);
+ return addChild(inodes, inodes.length-1, child, childDiskspace);
} finally {
writeUnlock();
}
@@ -1666,7 +1713,7 @@ public class FSDirectory implements Closeable {
* Its ancestors are stored at [0, pos-1].
* QuotaExceededException is thrown if it violates quota limit */
private T addChild(INode[] pathComponents, int pos,
- T child, long childDiskspace, boolean inheritPermission,
+ T child, long childDiskspace,
boolean checkQuota) throws QuotaExceededException {
// The filesystem limits are not really quotas, so this check may appear
// odd. It's because a rename operation deletes the src, tries to add
@@ -1689,7 +1736,7 @@ public class FSDirectory implements Closeable {
throw new NullPointerException("Panic: parent does not exist");
}
T addedNode = ((INodeDirectory)pathComponents[pos-1]).addChild(
- child, inheritPermission, true);
+ child, true);
if (addedNode == null) {
updateCount(pathComponents, pos, -counts.getNsCount(),
-childDiskspace, true);
@@ -1698,18 +1745,16 @@ public class FSDirectory implements Closeable {
}
private T addChild(INode[] pathComponents, int pos,
- T child, long childDiskspace, boolean inheritPermission)
+ T child, long childDiskspace)
throws QuotaExceededException {
- return addChild(pathComponents, pos, child, childDiskspace,
- inheritPermission, true);
+ return addChild(pathComponents, pos, child, childDiskspace, true);
}
private T addChildNoQuotaCheck(INode[] pathComponents,
- int pos, T child, long childDiskspace, boolean inheritPermission) {
+ int pos, T child, long childDiskspace) {
T inode = null;
try {
- inode = addChild(pathComponents, pos, child, childDiskspace,
- inheritPermission, false);
+ inode = addChild(pathComponents, pos, child, childDiskspace, false);
} catch (QuotaExceededException e) {
NameNode.LOG.warn("FSDirectory.addChildNoQuotaCheck - unexpected", e);
}
@@ -1934,9 +1979,9 @@ public class FSDirectory implements Closeable {
}
/**
- * Sets the access time on the file. Logs it in the transaction log.
+ * Sets the access time on the file/directory. Logs it in the transaction log.
*/
- void setTimes(String src, INodeFile inode, long mtime, long atime, boolean force) {
+ void setTimes(String src, INode inode, long mtime, long atime, boolean force) {
boolean status = false;
writeLock();
try {
@@ -1952,11 +1997,11 @@ public class FSDirectory implements Closeable {
boolean unprotectedSetTimes(String src, long mtime, long atime, boolean force)
throws UnresolvedLinkException {
assert hasWriteLock();
- INodeFile inode = getFileINode(src);
+ INode inode = getINode(src);
return unprotectedSetTimes(src, inode, mtime, atime, force);
}
- private boolean unprotectedSetTimes(String src, INodeFile inode, long mtime,
+ private boolean unprotectedSetTimes(String src, INode inode, long mtime,
long atime, boolean force) {
assert hasWriteLock();
boolean status = false;
@@ -2119,7 +2164,7 @@ public class FSDirectory implements Closeable {
assert hasWriteLock();
INodeSymlink newNode = new INodeSymlink(target, modTime, atime, perm);
try {
- newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE, false);
+ newNode = addNode(path, newNode, UNKNOWN_DISK_SPACE);
} catch (UnresolvedLinkException e) {
/* All UnresolvedLinkExceptions should have been resolved by now, but we
* should re-throw them in case that changes so they are not swallowed
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
index aac2a35592e..f80f863346f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
@@ -215,6 +215,12 @@ public class FSEditLog {
waitForSyncToFinish();
endCurrentLogSegment(true);
}
+
+ try {
+ journalSet.close();
+ } catch (IOException ioe) {
+ LOG.warn("Error closing journalSet", ioe);
+ }
state = State.CLOSED;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
index a6af3eb8e87..4cfb014dd53 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
@@ -998,18 +998,12 @@ public class FSImage implements Closeable {
/**
* End checkpoint.
*
- * Rename uploaded checkpoint to the new image;
- * purge old edits file;
- * rename edits.new to edits;
- * redirect edit log streams to the new edits;
- * update checkpoint time if the remote node is a checkpoint only node.
+ * Validate the current storage info with the given signature.
*
- * @param sig
- * @param remoteNNRole
- * @throws IOException
+ * @param sig to validate the current storage info against
+ * @throws IOException if the checkpoint fields are inconsistent
*/
- void endCheckpoint(CheckpointSignature sig,
- NamenodeRole remoteNNRole) throws IOException {
+ void endCheckpoint(CheckpointSignature sig) throws IOException {
sig.validateStorageInfo(this);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
index ff590c15280..95a343daecb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
@@ -17,6 +17,45 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BLOCK_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_ACCESSTIME_PRECISION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_KEY_UPDATE_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_MAX_LIFETIME_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_DELEGATION_TOKEN_RENEW_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_MAX_OBJECTS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPLICATION_MIN_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_REPL_QUEUE_THRESHOLD_PCT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_UPGRADE_PERMISSION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_SUPPORT_APPEND_KEY;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import java.io.BufferedWriter;
@@ -68,7 +107,6 @@ import org.apache.hadoop.fs.UnresolvedLinkException;
import org.apache.hadoop.fs.permission.FsAction;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.fs.permission.PermissionStatus;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
@@ -119,7 +157,6 @@ import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.metrics2.annotation.Metric;
import org.apache.hadoop.metrics2.annotation.Metrics;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
-import org.apache.hadoop.metrics2.lib.MutableCounterInt;
import org.apache.hadoop.metrics2.util.MBeans;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
@@ -203,8 +240,6 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private UserGroupInformation fsOwner;
private String supergroup;
private PermissionStatus defaultPermission;
- // FSNamesystemMetrics counter variables
- @Metric private MutableCounterInt expiredHeartbeats;
// Scan interval is not configurable.
private static final long DELEGATION_TOKEN_REMOVER_SCAN_INTERVAL =
@@ -312,7 +347,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_KEY,
DFS_NAMENODE_RESOURCE_CHECK_INTERVAL_DEFAULT);
this.systemStart = now();
- this.blockManager = new BlockManager(this, conf);
+ this.blockManager = new BlockManager(this, this, conf);
this.datanodeStatistics = blockManager.getDatanodeManager().getDatanodeStatistics();
this.fsLock = new ReentrantReadWriteLock(true); // fair locking
setConfigurationParameters(conf);
@@ -989,7 +1024,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
if (isPermissionEnabled) {
checkPathAccess(src, FsAction.WRITE);
}
- INodeFile inode = dir.getFileINode(src);
+ INode inode = dir.getINode(src);
if (inode != null) {
dir.setTimes(src, inode, mtime, atime, true);
if (auditLog.isInfoEnabled() && isExternalInvocation()) {
@@ -999,7 +1034,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
"setTimes", src, null, stat);
}
} else {
- throw new FileNotFoundException("File " + src + " does not exist.");
+ throw new FileNotFoundException("File/Directory " + src + " does not exist.");
}
} finally {
writeUnlock();
@@ -2675,9 +2710,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
return blockManager.getMissingBlocksCount();
}
- /** Increment expired heartbeat counter. */
- public void incrExpiredHeartbeats() {
- expiredHeartbeats.incr();
+ @Metric({"ExpiredHeartbeats", "Number of expired heartbeats"})
+ public int getExpiredHeartbeats() {
+ return datanodeStatistics.getExpiredHeartbeats();
}
/** @see ClientProtocol#getStats() */
@@ -2905,6 +2940,9 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
private SafeModeInfo(Configuration conf) {
this.threshold = conf.getFloat(DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY,
DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_DEFAULT);
+ if(threshold > 1.0) {
+ LOG.warn("The threshold value should't be greater than 1, threshold: " + threshold);
+ }
this.datanodeThreshold = conf.getInt(
DFS_NAMENODE_SAFEMODE_MIN_DATANODES_KEY,
DFS_NAMENODE_SAFEMODE_MIN_DATANODES_DEFAULT);
@@ -3188,7 +3226,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
msg += String.format(
"The reported blocks %d needs additional %d"
+ " blocks to reach the threshold %.4f of total blocks %d.",
- blockSafe, (blockThreshold - blockSafe), threshold, blockTotal);
+ blockSafe, (blockThreshold - blockSafe) + 1, threshold, blockTotal);
}
if (numLive < datanodeThreshold) {
if (!"".equals(msg)) {
@@ -3197,7 +3235,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
msg += String.format(
"The number of live datanodes %d needs an additional %d live "
+ "datanodes to reach the minimum number %d.",
- numLive, datanodeThreshold - numLive, datanodeThreshold);
+ numLive, (datanodeThreshold - numLive) + 1 , datanodeThreshold);
}
msg += " " + leaveMsg;
} else {
@@ -3362,7 +3400,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
/**
* Set the total number of blocks in the system.
*/
- private void setBlockTotal() {
+ void setBlockTotal() {
// safeMode is volatile, and may be set to null at any time
SafeModeInfo safeMode = this.safeMode;
if (safeMode == null)
@@ -3508,15 +3546,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
void endCheckpoint(NamenodeRegistration registration,
CheckpointSignature sig) throws IOException {
- writeLock();
+ readLock();
try {
if (isInSafeMode()) {
throw new SafeModeException("Checkpoint not ended", safeMode);
}
LOG.info("End checkpoint for " + registration.getAddress());
- getFSImage().endCheckpoint(sig, registration.getRole());
+ getFSImage().endCheckpoint(sig);
} finally {
- writeUnlock();
+ readUnlock();
}
}
@@ -4436,4 +4474,15 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
public BlockManager getBlockManager() {
return blockManager;
}
+
+ /**
+ * Verifies that the given identifier and password are valid and match.
+ * @param identifier Token identifier.
+ * @param password Password in the token.
+ * @throws InvalidToken
+ */
+ public synchronized void verifyToken(DelegationTokenIdentifier identifier,
+ byte[] password) throws InvalidToken {
+ getDelegationTokenSecretManager().verifyToken(identifier, password);
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
index 8cfc9758239..eeb40c2f572 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
@@ -70,6 +70,9 @@ class FileJournalManager implements JournalManager {
this.sd = sd;
}
+ @Override
+ public void close() throws IOException {}
+
@Override
synchronized public EditLogOutputStream startLogSegment(long txid)
throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
index c9cea600257..83d9858586e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INode.java
@@ -304,7 +304,6 @@ public abstract class INode implements Comparable, FSInodeInfo {
* Always set the last modification time of inode.
*/
void setModificationTimeForce(long modtime) {
- assert !isDirectory();
this.modificationTime = modtime;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
index 7f9a8e16261..7f0c997ee90 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
@@ -261,25 +261,13 @@ class INodeDirectory extends INode {
* Add a child inode to the directory.
*
* @param node INode to insert
- * @param inheritPermission inherit permission from parent?
* @param setModTime set modification time for the parent node
* not needed when replaying the addition and
* the parent already has the proper mod time
* @return null if the child with this name already exists;
* node, otherwise
*/
- T addChild(final T node, boolean inheritPermission,
- boolean setModTime) {
- if (inheritPermission) {
- FsPermission p = getFsPermission();
- //make sure the permission has wx for the user
- if (!p.getUserAction().implies(FsAction.WRITE_EXECUTE)) {
- p = new FsPermission(p.getUserAction().or(FsAction.WRITE_EXECUTE),
- p.getGroupAction(), p.getOtherAction());
- }
- node.setPermission(p);
- }
-
+ T addChild(final T node, boolean setModTime) {
if (children == null) {
children = new ArrayList(DEFAULT_FILES_PER_DIRECTORY);
}
@@ -297,31 +285,22 @@ class INodeDirectory extends INode {
return node;
}
- /**
- * Equivalent to addNode(path, newNode, false).
- * @see #addNode(String, INode, boolean)
- */
- T addNode(String path, T newNode)
- throws FileNotFoundException, UnresolvedLinkException {
- return addNode(path, newNode, false);
- }
/**
* Add new INode to the file tree.
* Find the parent and insert
*
* @param path file path
* @param newNode INode to be added
- * @param inheritPermission If true, copy the parent's permission to newNode.
* @return null if the node already exists; inserted INode, otherwise
* @throws FileNotFoundException if parent does not exist or
* @throws UnresolvedLinkException if any path component is a symbolic link
* is not a directory.
*/
- T addNode(String path, T newNode, boolean inheritPermission
+ T addNode(String path, T newNode
) throws FileNotFoundException, UnresolvedLinkException {
byte[][] pathComponents = getPathComponents(path);
if(addToParent(pathComponents, newNode,
- inheritPermission, true) == null)
+ true) == null)
return null;
return newNode;
}
@@ -338,13 +317,12 @@ class INodeDirectory extends INode {
INodeDirectory addToParent( byte[] localname,
INode newNode,
INodeDirectory parent,
- boolean inheritPermission,
boolean propagateModTime
) throws FileNotFoundException,
UnresolvedLinkException {
// insert into the parent children list
newNode.name = localname;
- if(parent.addChild(newNode, inheritPermission, propagateModTime) == null)
+ if(parent.addChild(newNode, propagateModTime) == null)
return null;
return parent;
}
@@ -380,7 +358,6 @@ class INodeDirectory extends INode {
*/
INodeDirectory addToParent( byte[][] pathComponents,
INode newNode,
- boolean inheritPermission,
boolean propagateModTime
) throws FileNotFoundException,
UnresolvedLinkException {
@@ -391,7 +368,7 @@ class INodeDirectory extends INode {
newNode.name = pathComponents[pathLen-1];
// insert into the parent children list
INodeDirectory parent = getParent(pathComponents);
- if(parent.addChild(newNode, inheritPermission, propagateModTime) == null)
+ if(parent.addChild(newNode, propagateModTime) == null)
return null;
return parent;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
index 0bb7b0f8aaf..348e3ef9819 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
+import java.io.Closeable;
import java.io.IOException;
@@ -27,7 +28,7 @@ import java.io.IOException;
* each conceptual place of storage corresponds to exactly one instance of
* this class, which is created when the EditLog is first opened.
*/
-interface JournalManager {
+interface JournalManager extends Closeable {
/**
* Begin writing to a new segment of the log stream, which starts at
* the given transaction ID.
@@ -81,6 +82,11 @@ interface JournalManager {
*/
void recoverUnfinalizedSegments() throws IOException;
+ /**
+ * Close the journal manager, freeing any resources it may hold.
+ */
+ void close() throws IOException;
+
/**
* Indicate that a journal is cannot be used to load a certain range of
* edits.
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
index 0d6bc743daf..45b5714082d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalSet.java
@@ -72,11 +72,20 @@ public class JournalSet implements JournalManager {
/**
* Closes the stream, also sets it to null.
*/
- public void close() throws IOException {
+ public void closeStream() throws IOException {
if (stream == null) return;
stream.close();
stream = null;
}
+
+ /**
+ * Close the Journal and Stream
+ */
+ public void close() throws IOException {
+ closeStream();
+
+ journal.close();
+ }
/**
* Aborts the stream, also sets it to null.
@@ -145,13 +154,23 @@ public class JournalSet implements JournalManager {
@Override
public void apply(JournalAndStream jas) throws IOException {
if (jas.isActive()) {
- jas.close();
+ jas.closeStream();
jas.getManager().finalizeLogSegment(firstTxId, lastTxId);
}
}
}, "finalize log segment " + firstTxId + ", " + lastTxId);
}
-
+
+ @Override
+ public void close() throws IOException {
+ mapJournalsAndReportErrors(new JournalClosure() {
+ @Override
+ public void apply(JournalAndStream jas) throws IOException {
+ jas.close();
+ }
+ }, "close journal");
+ }
+
/**
* Find the best editlog input stream to read from txid.
@@ -332,7 +351,7 @@ public class JournalSet implements JournalManager {
mapJournalsAndReportErrors(new JournalClosure() {
@Override
public void apply(JournalAndStream jas) throws IOException {
- jas.close();
+ jas.closeStream();
}
}, "close");
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
index 2aa1fba5bcd..4b59e509245 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeHttpServer.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.security.PrivilegedExceptionAction;
+import java.util.HashMap;
+import java.util.Map;
import javax.servlet.ServletContext;
@@ -107,8 +109,9 @@ public class NameNodeHttpServer {
//add SPNEGO authentication filter for webhdfs
final String name = "SPNEGO";
final String classname = AuthFilter.class.getName();
- final String pathSpec = "/" + WebHdfsFileSystem.PATH_PREFIX + "/*";
- defineFilter(webAppContext, name, classname, null,
+ final String pathSpec = WebHdfsFileSystem.PATH_PREFIX + "/*";
+ Map params = getAuthFilterParams(conf);
+ defineFilter(webAppContext, name, classname, params,
new String[]{pathSpec});
LOG.info("Added filter '" + name + "' (class=" + classname + ")");
@@ -118,6 +121,28 @@ public class NameNodeHttpServer {
+ ";" + Param.class.getPackage().getName(), pathSpec);
}
}
+
+ private Map getAuthFilterParams(Configuration conf)
+ throws IOException {
+ Map params = new HashMap();
+ String principalInConf = conf
+ .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY);
+ if (principalInConf != null && !principalInConf.isEmpty()) {
+ params
+ .put(
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+ SecurityUtil.getServerPrincipal(principalInConf,
+ infoHost));
+ }
+ String httpKeytab = conf
+ .get(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY);
+ if (httpKeytab != null && !httpKeytab.isEmpty()) {
+ params.put(
+ DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+ httpKeytab);
+ }
+ return params;
+ }
};
boolean certSSL = conf.getBoolean("dfs.https.enable", false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
index 24f999e1708..4d7cfd8fa92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
@@ -32,6 +32,8 @@ import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.common.Util;
+import com.google.common.annotations.VisibleForTesting;
+
/**
*
* NameNodeResourceChecker provides a method -
@@ -91,15 +93,16 @@ public class NameNodeResourceChecker {
}
/**
- * Return true if disk space is available on all all the configured volumes.
+ * Return true if disk space is available on at least one of the configured
+ * volumes.
*
- * @return True if the configured amount of disk space is available on all
- * volumes, false otherwise.
+ * @return True if the configured amount of disk space is available on at
+ * least one volume, false otherwise.
* @throws IOException
*/
boolean hasAvailableDiskSpace()
throws IOException {
- return getVolumesLowOnSpace().size() == 0;
+ return getVolumesLowOnSpace().size() < volumes.size();
}
/**
@@ -127,4 +130,9 @@ public class NameNodeResourceChecker {
}
return lowVolumes;
}
+
+ @VisibleForTesting
+ void setVolumes(Map volumes) {
+ this.volumes = volumes;
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
index 6546b8fe06b..97fce223ee7 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
@@ -60,6 +60,8 @@ import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.UpgradeAction;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeProtocolServerSideTranslatorR23;
import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.IncorrectVersionException;
@@ -72,6 +74,7 @@ import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
@@ -119,8 +122,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
private final InetSocketAddress serviceRPCAddress;
/** The RPC server that listens to requests from clients */
- protected final RPC.Server server;
- protected final InetSocketAddress rpcAddress;
+ protected final RPC.Server clientRpcServer;
+ protected final InetSocketAddress clientRpcAddress;
public NameNodeRpcServer(Configuration conf, NameNode nn)
throws IOException {
@@ -132,15 +135,31 @@ class NameNodeRpcServer implements NamenodeProtocols {
conf.getInt(DFS_DATANODE_HANDLER_COUNT_KEY,
DFS_DATANODE_HANDLER_COUNT_DEFAULT);
InetSocketAddress socAddr = nn.getRpcServerAddress(conf);
-
+ ClientNamenodeProtocolServerSideTranslatorR23
+ clientProtocolServerTranslator =
+ new ClientNamenodeProtocolServerSideTranslatorR23(this);
+
InetSocketAddress dnSocketAddr = nn.getServiceRpcServerAddress(conf);
if (dnSocketAddr != null) {
int serviceHandlerCount =
conf.getInt(DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY,
DFS_NAMENODE_SERVICE_HANDLER_COUNT_DEFAULT);
- this.serviceRpcServer = RPC.getServer(NamenodeProtocols.class, this,
- dnSocketAddr.getHostName(), dnSocketAddr.getPort(), serviceHandlerCount,
+ // Add all the RPC protocols that the namenode implements
+ this.serviceRpcServer =
+ RPC.getServer(org.apache.hadoop.hdfs.protocolR23Compatible.
+ ClientNamenodeWireProtocol.class, clientProtocolServerTranslator,
+ dnSocketAddr.getHostName(), dnSocketAddr.getPort(),
+ serviceHandlerCount,
false, conf, namesystem.getDelegationTokenSecretManager());
+ this.serviceRpcServer.addProtocol(DatanodeProtocol.class, this);
+ this.serviceRpcServer.addProtocol(NamenodeProtocol.class, this);
+ this.serviceRpcServer.addProtocol(
+ RefreshAuthorizationPolicyProtocol.class, this);
+ this.serviceRpcServer.addProtocol(
+ RefreshUserMappingsProtocol.class, this);
+ this.serviceRpcServer.addProtocol(GetUserMappingsProtocol.class, this);
+ this.serviceRpcServer.addProtocol(HAServiceProtocol.class, this);
+
this.serviceRPCAddress = this.serviceRpcServer.getListenerAddress();
nn.setRpcServiceServerAddress(conf, serviceRPCAddress);
} else {
@@ -148,38 +167,40 @@ class NameNodeRpcServer implements NamenodeProtocols {
serviceRPCAddress = null;
}
// Add all the RPC protocols that the namenode implements
- this.server = RPC.getServer(ClientProtocol.class, this,
- socAddr.getHostName(), socAddr.getPort(),
- handlerCount, false, conf,
- namesystem.getDelegationTokenSecretManager());
- this.server.addProtocol(DatanodeProtocol.class, this);
- this.server.addProtocol(NamenodeProtocol.class, this);
- this.server.addProtocol(RefreshAuthorizationPolicyProtocol.class, this);
- this.server.addProtocol(RefreshUserMappingsProtocol.class, this);
- this.server.addProtocol(GetUserMappingsProtocol.class, this);
- this.server.addProtocol(HAServiceProtocol.class, this);
+ this.clientRpcServer = RPC.getServer(
+ org.apache.hadoop.hdfs.protocolR23Compatible.
+ ClientNamenodeWireProtocol.class,
+ clientProtocolServerTranslator, socAddr.getHostName(),
+ socAddr.getPort(), handlerCount, false, conf,
+ namesystem.getDelegationTokenSecretManager());
+ this.clientRpcServer.addProtocol(DatanodeProtocol.class, this);
+ this.clientRpcServer.addProtocol(NamenodeProtocol.class, this);
+ this.clientRpcServer.addProtocol(
+ RefreshAuthorizationPolicyProtocol.class, this);
+ this.clientRpcServer.addProtocol(RefreshUserMappingsProtocol.class, this);
+ this.clientRpcServer.addProtocol(GetUserMappingsProtocol.class, this);
// set service-level authorization security policy
if (serviceAuthEnabled =
conf.getBoolean(
CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
- this.server.refreshServiceAcl(conf, new HDFSPolicyProvider());
+ this.clientRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
if (this.serviceRpcServer != null) {
this.serviceRpcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
}
}
// The rpc-server port can be ephemeral... ensure we have the correct info
- this.rpcAddress = this.server.getListenerAddress();
- nn.setRpcServerAddress(conf, rpcAddress);
+ this.clientRpcAddress = this.clientRpcServer.getListenerAddress();
+ nn.setRpcServerAddress(conf, clientRpcAddress);
}
/**
* Actually start serving requests.
*/
void start() {
- server.start(); //start RPC server
+ clientRpcServer.start(); //start RPC server
if (serviceRpcServer != null) {
serviceRpcServer.start();
}
@@ -189,11 +210,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
* Wait until the RPC server has shut down.
*/
void join() throws InterruptedException {
- this.server.join();
+ this.clientRpcServer.join();
}
void stop() {
- if(server != null) server.stop();
+ if(clientRpcServer != null) clientRpcServer.stop();
if(serviceRpcServer != null) serviceRpcServer.stop();
}
@@ -202,7 +223,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
}
InetSocketAddress getRpcAddress() {
- return rpcAddress;
+ return clientRpcAddress;
}
@Override // VersionedProtocol
@@ -216,7 +237,8 @@ class NameNodeRpcServer implements NamenodeProtocols {
public long getProtocolVersion(String protocol,
long clientVersion) throws IOException {
if (protocol.equals(ClientProtocol.class.getName())) {
- return ClientProtocol.versionID;
+ throw new IOException("Old Namenode Client protocol is not supported:" +
+ protocol + "Switch your clientside to " + ClientNamenodeWireProtocol.class);
} else if (protocol.equals(DatanodeProtocol.class.getName())){
return DatanodeProtocol.versionID;
} else if (protocol.equals(NamenodeProtocol.class.getName())){
@@ -850,7 +872,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
if (nn.getFSImage().isUpgradeFinalized())
- return new DatanodeCommand.Finalize(poolId);
+ return new FinalizeCommand(poolId);
return null;
}
@@ -923,7 +945,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
throw new AuthorizationException("Service Level Authorization not enabled!");
}
- this.server.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
+ this.clientRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
if (this.serviceRpcServer != null) {
this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
index 0c180c08d7d..d002fde1844 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/ConfiguredFailoverProxyProvider.java
@@ -62,7 +62,9 @@ public class ConfiguredFailoverProxyProvider implements FailoverProxyProvider,
AddressRpcProxyPair current = proxies.get(currentProxyIndex);
if (current.namenode == null) {
try {
- current.namenode = DFSUtil.createRPCNamenode(current.address, conf, ugi);
+ // TODO(HA): This will create a NN proxy with an underlying retry
+ // proxy. We don't want this.
+ current.namenode = DFSUtil.createNamenode(current.address, conf, ugi);
} catch (IOException e) {
LOG.error("Failed to create RPC proxy to NameNode", e);
throw new RuntimeException(e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
index 8e0b6b091ee..46ea367cb92 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java
@@ -19,7 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode.web.resources;
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.io.InputStream;
import java.io.OutputStream;
import java.io.PrintStream;
import java.net.URI;
@@ -29,6 +28,7 @@ import java.util.EnumSet;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
+import javax.servlet.http.HttpServletResponse;
import javax.ws.rs.Consumes;
import javax.ws.rs.DELETE;
import javax.ws.rs.DefaultValue;
@@ -42,16 +42,20 @@ import javax.ws.rs.QueryParam;
import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
+import javax.ws.rs.core.Response.ResponseBuilder;
+import javax.ws.rs.core.Response.Status;
import javax.ws.rs.core.StreamingOutput;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.JspHelper;
@@ -64,8 +68,9 @@ import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
import org.apache.hadoop.hdfs.web.resources.DelegationParam;
+import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
-import org.apache.hadoop.hdfs.web.resources.DstPathParam;
+import org.apache.hadoop.hdfs.web.resources.DestinationParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
@@ -84,7 +89,6 @@ import org.apache.hadoop.hdfs.web.resources.RenewerParam;
import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UriFsPathParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
@@ -100,6 +104,8 @@ import com.sun.jersey.spi.container.ResourceFilters;
public class NamenodeWebHdfsMethods {
public static final Log LOG = LogFactory.getLog(NamenodeWebHdfsMethods.class);
+ private static final UriFsPathParam ROOT = new UriFsPathParam("");
+
private static final ThreadLocal REMOTE_ADDRESS = new ThreadLocal();
/** @return the remote client address. */
@@ -109,6 +115,7 @@ public class NamenodeWebHdfsMethods {
private @Context ServletContext context;
private @Context HttpServletRequest request;
+ private @Context HttpServletResponse response;
private static DatanodeInfo chooseDatanode(final NameNode namenode,
final String path, final HttpOpParam.Op op, final long openOffset
@@ -118,6 +125,9 @@ public class NamenodeWebHdfsMethods {
|| op == PostOpParam.Op.APPEND) {
final NamenodeProtocols np = namenode.getRpcServer();
final HdfsFileStatus status = np.getFileInfo(path);
+ if (status == null) {
+ throw new FileNotFoundException("File " + path + " not found.");
+ }
final long len = status.getLen();
if (op == GetOpParam.Op.OPEN && (openOffset < 0L || openOffset >= len)) {
throw new IOException("Offset=" + openOffset + " out of the range [0, "
@@ -143,11 +153,11 @@ public class NamenodeWebHdfsMethods {
final NameNode namenode, final UserGroupInformation ugi,
final String renewer) throws IOException {
final Credentials c = DelegationTokenSecretManager.createCredentials(
- namenode, ugi, request.getUserPrincipal().getName());
+ namenode, ugi,
+ renewer != null? renewer: request.getUserPrincipal().getName());
final Token extends TokenIdentifier> t = c.getAllTokens().iterator().next();
- t.setService(new Text(SecurityUtil.buildDTServiceName(
- NameNode.getUri(namenode.getNameNodeAddress()),
- NameNode.DEFAULT_PORT)));
+ t.setKind(WebHdfsFileSystem.TOKEN_KIND);
+ SecurityUtil.setTokenService(t, namenode.getNameNodeAddress());
return t;
}
@@ -173,7 +183,7 @@ public class NamenodeWebHdfsMethods {
final String query = op.toQueryString()
+ '&' + new UserParam(ugi) + delegationQuery
+ Param.toSortedString("&", parameters);
- final String uripath = "/" + WebHdfsFileSystem.PATH_PREFIX + path;
+ final String uripath = WebHdfsFileSystem.PATH_PREFIX + path;
final URI uri = new URI("http", null, dn.getHostName(), dn.getInfoPort(),
uripath, query, null);
@@ -183,21 +193,19 @@ public class NamenodeWebHdfsMethods {
return uri;
}
- /** Handle HTTP PUT request. */
+ /** Handle HTTP PUT request for the root. */
@PUT
- @Path("{" + UriFsPathParam.NAME + ":.*}")
+ @Path("/")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON})
- public Response put(
- final InputStream in,
+ public Response putRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
- @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
@QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT)
final PutOpParam op,
- @QueryParam(DstPathParam.NAME) @DefaultValue(DstPathParam.DEFAULT)
- final DstPathParam dstPath,
+ @QueryParam(DestinationParam.NAME) @DefaultValue(DestinationParam.DEFAULT)
+ final DestinationParam destination,
@QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT)
final OwnerParam owner,
@QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT)
@@ -217,16 +225,63 @@ public class NamenodeWebHdfsMethods {
@QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
final AccessTimeParam accessTime,
@QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
- final RenameOptionSetParam renameOptions
- ) throws IOException, URISyntaxException, InterruptedException {
+ final RenameOptionSetParam renameOptions,
+ @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
+ final TokenArgumentParam delegationTokenArgument
+ ) throws IOException, InterruptedException {
+ return put(ugi, delegation, ROOT, op, destination, owner, group,
+ permission, overwrite, bufferSize, replication, blockSize,
+ modificationTime, accessTime, renameOptions, delegationTokenArgument);
+ }
+
+ /** Handle HTTP PUT request. */
+ @PUT
+ @Path("{" + UriFsPathParam.NAME + ":.*}")
+ @Consumes({"*/*"})
+ @Produces({MediaType.APPLICATION_JSON})
+ public Response put(
+ @Context final UserGroupInformation ugi,
+ @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
+ final DelegationParam delegation,
+ @PathParam(UriFsPathParam.NAME) final UriFsPathParam path,
+ @QueryParam(PutOpParam.NAME) @DefaultValue(PutOpParam.DEFAULT)
+ final PutOpParam op,
+ @QueryParam(DestinationParam.NAME) @DefaultValue(DestinationParam.DEFAULT)
+ final DestinationParam destination,
+ @QueryParam(OwnerParam.NAME) @DefaultValue(OwnerParam.DEFAULT)
+ final OwnerParam owner,
+ @QueryParam(GroupParam.NAME) @DefaultValue(GroupParam.DEFAULT)
+ final GroupParam group,
+ @QueryParam(PermissionParam.NAME) @DefaultValue(PermissionParam.DEFAULT)
+ final PermissionParam permission,
+ @QueryParam(OverwriteParam.NAME) @DefaultValue(OverwriteParam.DEFAULT)
+ final OverwriteParam overwrite,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize,
+ @QueryParam(ReplicationParam.NAME) @DefaultValue(ReplicationParam.DEFAULT)
+ final ReplicationParam replication,
+ @QueryParam(BlockSizeParam.NAME) @DefaultValue(BlockSizeParam.DEFAULT)
+ final BlockSizeParam blockSize,
+ @QueryParam(ModificationTimeParam.NAME) @DefaultValue(ModificationTimeParam.DEFAULT)
+ final ModificationTimeParam modificationTime,
+ @QueryParam(AccessTimeParam.NAME) @DefaultValue(AccessTimeParam.DEFAULT)
+ final AccessTimeParam accessTime,
+ @QueryParam(RenameOptionSetParam.NAME) @DefaultValue(RenameOptionSetParam.DEFAULT)
+ final RenameOptionSetParam renameOptions,
+ @QueryParam(TokenArgumentParam.NAME) @DefaultValue(TokenArgumentParam.DEFAULT)
+ final TokenArgumentParam delegationTokenArgument
+ ) throws IOException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
- + Param.toSortedString(", ", dstPath, owner, group, permission,
+ + Param.toSortedString(", ", destination, owner, group, permission,
overwrite, bufferSize, replication, blockSize,
modificationTime, accessTime, renameOptions));
}
+ //clear content type
+ response.setContentType(null);
+
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException, URISyntaxException {
@@ -234,6 +289,7 @@ public class NamenodeWebHdfsMethods {
try {
final String fullpath = path.getAbsolutePath();
+ final Configuration conf = (Configuration)context.getAttribute(JspHelper.CURRENT_CONF);
final NameNode namenode = (NameNode)context.getAttribute("name.node");
final NamenodeProtocols np = namenode.getRpcServer();
@@ -255,24 +311,28 @@ public class NamenodeWebHdfsMethods {
{
final EnumSet s = renameOptions.getValue();
if (s.isEmpty()) {
- @SuppressWarnings("deprecation")
- final boolean b = np.rename(fullpath, dstPath.getValue());
+ final boolean b = np.rename(fullpath, destination.getValue());
final String js = JsonUtil.toJsonString("boolean", b);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
} else {
- np.rename2(fullpath, dstPath.getValue(),
+ np.rename2(fullpath, destination.getValue(),
s.toArray(new Options.Rename[s.size()]));
return Response.ok().type(MediaType.APPLICATION_JSON).build();
}
}
case SETREPLICATION:
{
- final boolean b = np.setReplication(fullpath, replication.getValue());
+ final boolean b = np.setReplication(fullpath, replication.getValue(conf));
final String js = JsonUtil.toJsonString("boolean", b);
- return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ final ResponseBuilder r = b? Response.ok(): Response.status(Status.FORBIDDEN);
+ return r.entity(js).type(MediaType.APPLICATION_JSON).build();
}
case SETOWNER:
{
+ if (owner.getValue() == null && group.getValue() == null) {
+ throw new IllegalArgumentException("Both owner and group are empty.");
+ }
+
np.setOwner(fullpath, owner.getValue(), group.getValue());
return Response.ok().type(MediaType.APPLICATION_JSON).build();
}
@@ -286,6 +346,21 @@ public class NamenodeWebHdfsMethods {
np.setTimes(fullpath, modificationTime.getValue(), accessTime.getValue());
return Response.ok().type(MediaType.APPLICATION_JSON).build();
}
+ case RENEWDELEGATIONTOKEN:
+ {
+ final Token token = new Token();
+ token.decodeFromUrlString(delegationTokenArgument.getValue());
+ final long expiryTime = np.renewDelegationToken(token);
+ final String js = JsonUtil.toJsonString("long", expiryTime);
+ return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
+ }
+ case CANCELDELEGATIONTOKEN:
+ {
+ final Token token = new Token();
+ token.decodeFromUrlString(delegationTokenArgument.getValue());
+ np.cancelDelegationToken(token);
+ return Response.ok().type(MediaType.APPLICATION_JSON).build();
+ }
default:
throw new UnsupportedOperationException(op + " is not supported");
}
@@ -297,13 +372,29 @@ public class NamenodeWebHdfsMethods {
});
}
+ /** Handle HTTP POST request for the root. */
+ @POST
+ @Path("/")
+ @Consumes({"*/*"})
+ @Produces({MediaType.APPLICATION_JSON})
+ public Response postRoot(
+ @Context final UserGroupInformation ugi,
+ @QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
+ final DelegationParam delegation,
+ @QueryParam(PostOpParam.NAME) @DefaultValue(PostOpParam.DEFAULT)
+ final PostOpParam op,
+ @QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
+ final BufferSizeParam bufferSize
+ ) throws IOException, InterruptedException {
+ return post(ugi, delegation, ROOT, op, bufferSize);
+ }
+
/** Handle HTTP POST request. */
@POST
@Path("{" + UriFsPathParam.NAME + ":.*}")
@Consumes({"*/*"})
@Produces({MediaType.APPLICATION_JSON})
public Response post(
- final InputStream in,
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@@ -312,13 +403,16 @@ public class NamenodeWebHdfsMethods {
final PostOpParam op,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
- ) throws IOException, URISyntaxException, InterruptedException {
+ ) throws IOException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
+ Param.toSortedString(", ", bufferSize));
}
+ //clear content type
+ response.setContentType(null);
+
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException, URISyntaxException {
@@ -346,13 +440,11 @@ public class NamenodeWebHdfsMethods {
});
}
- private static final UriFsPathParam ROOT = new UriFsPathParam("");
-
/** Handle HTTP GET request for the root. */
@GET
@Path("/")
@Produces({MediaType.APPLICATION_OCTET_STREAM, MediaType.APPLICATION_JSON})
- public Response root(
+ public Response getRoot(
@Context final UserGroupInformation ugi,
@QueryParam(DelegationParam.NAME) @DefaultValue(DelegationParam.DEFAULT)
final DelegationParam delegation,
@@ -389,13 +481,15 @@ public class NamenodeWebHdfsMethods {
final RenewerParam renewer,
@QueryParam(BufferSizeParam.NAME) @DefaultValue(BufferSizeParam.DEFAULT)
final BufferSizeParam bufferSize
- ) throws IOException, URISyntaxException, InterruptedException {
+ ) throws IOException, InterruptedException {
if (LOG.isTraceEnabled()) {
LOG.trace(op + ": " + path + ", ugi=" + ugi
+ Param.toSortedString(", ", offset, length, renewer, bufferSize));
}
+ //clear content type
+ response.setContentType(null);
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
@@ -419,14 +513,18 @@ public class NamenodeWebHdfsMethods {
final long offsetValue = offset.getValue();
final Long lengthValue = length.getValue();
final LocatedBlocks locatedblocks = np.getBlockLocations(fullpath,
- offsetValue, lengthValue != null? lengthValue: offsetValue + 1);
+ offsetValue, lengthValue != null? lengthValue: Long.MAX_VALUE);
final String js = JsonUtil.toJsonString(locatedblocks);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case GETFILESTATUS:
{
final HdfsFileStatus status = np.getFileInfo(fullpath);
- final String js = JsonUtil.toJsonString(status);
+ if (status == null) {
+ throw new FileNotFoundException("File does not exist: " + fullpath);
+ }
+
+ final String js = JsonUtil.toJsonString(status, true);
return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
}
case LISTSTATUS:
@@ -482,33 +580,49 @@ public class NamenodeWebHdfsMethods {
@Override
public void write(final OutputStream outstream) throws IOException {
final PrintStream out = new PrintStream(outstream);
- out.println("{\"" + HdfsFileStatus[].class.getSimpleName() + "\":[");
+ out.println("{\"" + HdfsFileStatus.class.getSimpleName() + "es\":{\""
+ + HdfsFileStatus.class.getSimpleName() + "\":[");
final HdfsFileStatus[] partial = first.getPartialListing();
if (partial.length > 0) {
- out.print(JsonUtil.toJsonString(partial[0]));
+ out.print(JsonUtil.toJsonString(partial[0], false));
}
for(int i = 1; i < partial.length; i++) {
out.println(',');
- out.print(JsonUtil.toJsonString(partial[i]));
+ out.print(JsonUtil.toJsonString(partial[i], false));
}
for(DirectoryListing curr = first; curr.hasMore(); ) {
curr = getDirectoryListing(np, p, curr.getLastName());
for(HdfsFileStatus s : curr.getPartialListing()) {
out.println(',');
- out.print(JsonUtil.toJsonString(s));
+ out.print(JsonUtil.toJsonString(s, false));
}
}
- out.println("]}");
+ out.println();
+ out.println("]}}");
}
};
}
+ /** Handle HTTP DELETE request for the root. */
+ @DELETE
+ @Path("/")
+ @Produces(MediaType.APPLICATION_JSON)
+ public Response deleteRoot(
+ @Context final UserGroupInformation ugi,
+ @QueryParam(DeleteOpParam.NAME) @DefaultValue(DeleteOpParam.DEFAULT)
+ final DeleteOpParam op,
+ @QueryParam(RecursiveParam.NAME) @DefaultValue(RecursiveParam.DEFAULT)
+ final RecursiveParam recursive
+ ) throws IOException, InterruptedException {
+ return delete(ugi, ROOT, op, recursive);
+ }
+
/** Handle HTTP DELETE request. */
@DELETE
- @Path("{path:.*}")
+ @Path("{" + UriFsPathParam.NAME + ":.*}")
@Produces(MediaType.APPLICATION_JSON)
public Response delete(
@Context final UserGroupInformation ugi,
@@ -524,6 +638,9 @@ public class NamenodeWebHdfsMethods {
+ Param.toSortedString(", ", recursive));
}
+ //clear content type
+ response.setContentType(null);
+
return ugi.doAs(new PrivilegedExceptionAction() {
@Override
public Response run() throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
index d236da36aec..cfbfb0a5360 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java
@@ -67,7 +67,6 @@ public class BlockCommand extends DatanodeCommand {
public BlockCommand(int action, String poolId,
List blocktargetlist) {
super(action);
-
this.poolId = poolId;
blocks = new Block[blocktargetlist.size()];
targets = new DatanodeInfo[blocks.length][];
@@ -85,12 +84,21 @@ public class BlockCommand extends DatanodeCommand {
* @param blocks blocks related to the action
*/
public BlockCommand(int action, String poolId, Block blocks[]) {
+ this(action, poolId, blocks, EMPTY_TARGET);
+ }
+
+ /**
+ * Create BlockCommand for the given action
+ * @param blocks blocks related to the action
+ */
+ public BlockCommand(int action, String poolId, Block[] blocks,
+ DatanodeInfo[][] targets) {
super(action);
this.poolId = poolId;
this.blocks = blocks;
- this.targets = EMPTY_TARGET;
+ this.targets = targets;
}
-
+
public String getBlockPoolId() {
return poolId;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
index 992deb88028..0c2e55e6933 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/BlockRecoveryCommand.java
@@ -117,8 +117,12 @@ public class BlockRecoveryCommand extends DatanodeCommand {
* the specified capacity for recovering blocks.
*/
public BlockRecoveryCommand(int capacity) {
+ this(new ArrayList(capacity));
+ }
+
+ public BlockRecoveryCommand(Collection blocks) {
super(DatanodeProtocol.DNA_RECOVERBLOCK);
- recoveringBlocks = new ArrayList(capacity);
+ recoveringBlocks = blocks;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
index 52396d2408f..9c6950f2174 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java
@@ -17,17 +17,9 @@
*/
package org.apache.hadoop.hdfs.server.protocol;
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
+import org.apache.avro.reflect.Union;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.io.WritableFactory;
-import org.apache.hadoop.io.WritableFactories;
-import org.apache.hadoop.io.WritableUtils;
-import org.apache.avro.reflect.Union;
/**
* Base class for data-node command.
@@ -36,55 +28,13 @@ import org.apache.avro.reflect.Union;
// Declare subclasses for Avro's denormalized representation
@Union({Void.class,
- DatanodeCommand.Register.class, DatanodeCommand.Finalize.class,
+ RegisterCommand.class, FinalizeCommand.class,
BlockCommand.class, UpgradeCommand.class,
BlockRecoveryCommand.class, KeyUpdateCommand.class})
@InterfaceAudience.Private
@InterfaceStability.Evolving
public abstract class DatanodeCommand extends ServerCommand {
- static class Register extends DatanodeCommand {
- private Register() {super(DatanodeProtocol.DNA_REGISTER);}
- public void readFields(DataInput in) {}
- public void write(DataOutput out) {}
- }
-
- public static class Finalize extends DatanodeCommand {
- String blockPoolId;
- private Finalize() {
- super(DatanodeProtocol.DNA_FINALIZE);
- }
-
- public Finalize(String bpid) {
- super(DatanodeProtocol.DNA_FINALIZE);
- blockPoolId = bpid;
- }
-
- public String getBlockPoolId() {
- return blockPoolId;
- }
-
- public void readFields(DataInput in) throws IOException {
- blockPoolId = WritableUtils.readString(in);
- }
- public void write(DataOutput out) throws IOException {
- WritableUtils.writeString(out, blockPoolId);
- }
- }
-
- static { // register a ctor
- WritableFactories.setFactory(Register.class,
- new WritableFactory() {
- public Writable newInstance() {return new Register();}
- });
- WritableFactories.setFactory(Finalize.class,
- new WritableFactory() {
- public Writable newInstance() {return new Finalize();}
- });
- }
-
- public static final DatanodeCommand REGISTER = new Register();
-
public DatanodeCommand() {
super();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
index 9fb509109ee..28f54e86eee 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java
@@ -22,10 +22,11 @@ import java.io.*;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.DatanodeWireProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@@ -45,7 +46,14 @@ import org.apache.avro.reflect.Nullable;
@InterfaceAudience.Private
public interface DatanodeProtocol extends VersionedProtocol {
/**
- * 28: Add Balancer Bandwidth Command protocol.
+ * This class is used by both the Namenode (client) and BackupNode (server)
+ * to insulate from the protocol serialization.
+ *
+ * If you are adding/changing DN's interface then you need to
+ * change both this class and ALSO
+ * {@link DatanodeWireProtocol}.
+ * These changes need to be done in a compatible fashion as described in
+ * {@link ClientNamenodeWireProtocol}
*/
public static final long versionID = 28L;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
index 5dc8825e26b..c0fea64089a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/DatanodeRegistration.java
@@ -63,9 +63,14 @@ implements Writable, NodeRegistration {
* Create DatanodeRegistration
*/
public DatanodeRegistration(String nodeName) {
+ this(nodeName, new StorageInfo(), new ExportedBlockKeys());
+ }
+
+ public DatanodeRegistration(String nodeName, StorageInfo info,
+ ExportedBlockKeys keys) {
super(nodeName);
- this.storageInfo = new StorageInfo();
- this.exportedKeys = new ExportedBlockKeys();
+ this.storageInfo = info;
+ this.exportedKeys = keys;
}
public void setStorageInfo(StorageInfo storage) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java
new file mode 100644
index 00000000000..3bc8b117c2c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/FinalizeCommand.java
@@ -0,0 +1,68 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
+
+/**
+ * A BlockCommand is an instruction to a datanode to register with the namenode.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class FinalizeCommand extends DatanodeCommand {
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(FinalizeCommand.class, new WritableFactory() {
+ public Writable newInstance() {
+ return new FinalizeCommand();
+ }
+ });
+ }
+
+ String blockPoolId;
+ private FinalizeCommand() {
+ super(DatanodeProtocol.DNA_FINALIZE);
+ }
+
+ public FinalizeCommand(String bpid) {
+ super(DatanodeProtocol.DNA_FINALIZE);
+ blockPoolId = bpid;
+ }
+
+ public String getBlockPoolId() {
+ return blockPoolId;
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ blockPoolId = WritableUtils.readString(in);
+ }
+ public void write(DataOutput out) throws IOException {
+ WritableUtils.writeString(out, blockPoolId);
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
index 1c62a605957..7d4808807eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/InterDatanodeProtocol.java
@@ -25,7 +25,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocolR23Compatible.InterDatanodeWireProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@@ -39,6 +41,23 @@ public interface InterDatanodeProtocol extends VersionedProtocol {
public static final Log LOG = LogFactory.getLog(InterDatanodeProtocol.class);
/**
+ * Until version 9, this class InterDatanodeProtocol served as both
+ * the interface to the DN AND the RPC protocol used to communicate with the
+ * DN.
+ *
+ * Post version 6L (release 23 of Hadoop), the protocol is implemented in
+ * {@literal ../protocolR23Compatible/InterDatanodeWireProtocol}
+ *
+ * This class is used by both the DN to insulate from the protocol
+ * serialization.
+ *
+ * If you are adding/changing DN's interface then you need to
+ * change both this class and ALSO
+ * {@link InterDatanodeWireProtocol}
+ * These changes need to be done in a compatible fashion as described in
+ * {@link ClientNamenodeWireProtocol}
+ *
+ * The log of historical changes can be retrieved from the svn).
* 6: Add block pool ID to Block
*/
public static final long versionID = 6L;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
index 224208d7a3f..86719bc60fa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/JournalProtocol.java
@@ -21,6 +21,8 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolR23Compatible.JournalWireProtocol;
import org.apache.hadoop.ipc.VersionedProtocol;
import org.apache.hadoop.security.KerberosInfo;
@@ -33,6 +35,17 @@ import org.apache.hadoop.security.KerberosInfo;
clientPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
@InterfaceAudience.Private
public interface JournalProtocol extends VersionedProtocol {
+ /**
+ *
+ * This class is used by both the Namenode (client) and BackupNode (server)
+ * to insulate from the protocol serialization.
+ *
+ * If you are adding/changing DN's interface then you need to
+ * change both this class and ALSO
+ * {@link JournalWireProtocol}.
+ * These changes need to be done in a compatible fashion as described in
+ * {@link ClientNamenodeWireProtocol}
+ */
public static final long versionID = 1L;
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
index a58d0d086e4..48de14c657f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocol.java
@@ -38,9 +38,21 @@ import org.apache.hadoop.security.KerberosInfo;
@InterfaceAudience.Private
public interface NamenodeProtocol extends VersionedProtocol {
/**
- * Compared to the previous version the following changes have been introduced:
- * (Only the latest change is reflected.
- * The log of historical changes can be retrieved from the svn).
+ * Until version 6L, this class served as both
+ * the client interface to the NN AND the RPC protocol used to
+ * communicate with the NN.
+ *
+ * Post version 70 (release 23 of Hadoop), the protocol is implemented in
+ * {@literal ../protocolR23Compatible/ClientNamenodeWireProtocol}
+ *
+ * This class is used by both the DFSClient and the
+ * NN server side to insulate from the protocol serialization.
+ *
+ * If you are adding/changing NN's interface then you need to
+ * change both this class and ALSO
+ * {@link org.apache.hadoop.hdfs.protocolR23Compatible.NamenodeWireProtocol}.
+ * These changes need to be done in a compatible fashion as described in
+ * {@link org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol}
*
* 6: Switch to txid-based file naming for image and edits
*/
@@ -62,7 +74,7 @@ public interface NamenodeProtocol extends VersionedProtocol {
* @param datanode a data node
* @param size requested size
* @return a list of blocks & their locations
- * @throws RemoteException if size is less than or equal to 0 or
+ * @throws IOException if size is less than or equal to 0 or
datanode does not exist
*/
public BlocksWithLocations getBlocks(DatanodeInfo datanode, long size)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
index aa98ab19b60..2ee4d40ef45 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamenodeRegistration.java
@@ -52,10 +52,9 @@ implements NodeRegistration {
String httpAddress,
StorageInfo storageInfo,
NamenodeRole role) {
- super();
+ super(storageInfo);
this.rpcAddress = address;
this.httpAddress = httpAddress;
- this.setStorageInfo(storageInfo);
this.role = role;
}
@@ -64,6 +63,10 @@ implements NodeRegistration {
return rpcAddress;
}
+ public String getHttpAddress() {
+ return httpAddress;
+ }
+
@Override // NodeRegistration
public String getRegistrationID() {
return Storage.getRegistrationID(this);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
new file mode 100644
index 00000000000..05843475f65
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/RegisterCommand.java
@@ -0,0 +1,57 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocol;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * A BlockCommand is an instruction to a datanode to register with the namenode.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RegisterCommand extends DatanodeCommand {
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(RegisterCommand.class, new WritableFactory() {
+ public Writable newInstance() {
+ return new RegisterCommand();
+ }
+ });
+ }
+
+ public static final DatanodeCommand REGISTER = new RegisterCommand();
+
+ public RegisterCommand() {
+ super(DatanodeProtocol.DNA_REGISTER);
+ }
+
+ @Override
+ public void readFields(DataInput in) { }
+
+ @Override
+ public void write(DataOutput out) { }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BalancerBandwidthCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BalancerBandwidthCommandWritable.java
new file mode 100644
index 00000000000..d6a2b0648f8
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BalancerBandwidthCommandWritable.java
@@ -0,0 +1,107 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * Balancer bandwidth command instructs each datanode to change its value for
+ * the max amount of network bandwidth it may use during the block balancing
+ * operation.
+ *
+ * The Balancer Bandwidth Command contains the new bandwidth value as its
+ * payload. The bandwidth value is in bytes per second.
+ */
+public class BalancerBandwidthCommandWritable extends DatanodeCommandWritable {
+ private final static long BBC_DEFAULTBANDWIDTH = 0L;
+
+ private long bandwidth;
+
+ /**
+ * Balancer Bandwidth Command constructor. Sets bandwidth to 0.
+ */
+ BalancerBandwidthCommandWritable() {
+ this(BBC_DEFAULTBANDWIDTH);
+ }
+
+ /**
+ * Balancer Bandwidth Command constructor.
+ * @param bandwidth Blanacer bandwidth in bytes per second.
+ */
+ public BalancerBandwidthCommandWritable(long bandwidth) {
+ super(DatanodeWireProtocol.DNA_BALANCERBANDWIDTHUPDATE);
+ this.bandwidth = bandwidth;
+ }
+
+ /**
+ * Get current value of the max balancer bandwidth in bytes per second.
+ * @return bandwidth Blanacer bandwidth in bytes per second for this datanode.
+ */
+ public long getBalancerBandwidthValue() {
+ return this.bandwidth;
+ }
+
+ // ///////////////////////////////////////////////
+ // Writable
+ // ///////////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(BalancerBandwidthCommandWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new BalancerBandwidthCommandWritable();
+ }
+ });
+ }
+
+ /**
+ * Writes the bandwidth payload to the Balancer Bandwidth Command packet.
+ * @param out DataOutput stream used for writing commands to the datanode.
+ * @throws IOException
+ */
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ out.writeLong(this.bandwidth);
+ }
+
+ /**
+ * Reads the bandwidth payload from the Balancer Bandwidth Command packet.
+ * @param in DataInput stream used for reading commands to the datanode.
+ * @throws IOException
+ */
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ this.bandwidth = in.readLong();
+ }
+
+ @Override
+ public DatanodeCommand convert() {
+ return new BalancerBandwidthCommand(bandwidth);
+ }
+
+ public static DatanodeCommandWritable convert(BalancerBandwidthCommand cmd) {
+ return new BalancerBandwidthCommandWritable(cmd.getBalancerBandwidthValue());
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockCommandWritable.java
new file mode 100644
index 00000000000..990b235c9fe
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockCommandWritable.java
@@ -0,0 +1,147 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocolR23Compatible.BlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeInfoWritable;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTargetPair;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/****************************************************
+ * A BlockCommand is an instruction to a datanode regarding some blocks under
+ * its control. It tells the DataNode to either invalidate a set of indicated
+ * blocks, or to copy a set of indicated blocks to another DataNode.
+ *
+ ****************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BlockCommandWritable extends DatanodeCommandWritable {
+
+ /**
+ * This constant is used to indicate that the block deletion does not need
+ * explicit ACK from the datanode. When a block is put into the list of blocks
+ * to be deleted, it's size is set to this constant. We assume that no block
+ * would actually have this size. Otherwise, we would miss ACKs for blocks
+ * with such size. Positive number is used for compatibility reasons.
+ */
+ public static final long NO_ACK = Long.MAX_VALUE;
+
+ String poolId;
+ BlockWritable blocks[];
+ DatanodeInfoWritable targets[][];
+
+ public BlockCommandWritable() {
+ }
+
+ /**
+ * Create BlockCommand for the given action
+ *
+ * @param blocks blocks related to the action
+ */
+ public BlockCommandWritable(int action, String poolId, BlockWritable[] blocks,
+ DatanodeInfoWritable[][] targets) {
+ super(action);
+ this.poolId = poolId;
+ this.blocks = blocks;
+ this.targets = targets;
+ }
+
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(BlockCommandWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new BlockCommandWritable();
+ }
+ });
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ Text.writeString(out, poolId);
+ out.writeInt(blocks.length);
+ for (int i = 0; i < blocks.length; i++) {
+ blocks[i].write(out);
+ }
+ out.writeInt(targets.length);
+ for (int i = 0; i < targets.length; i++) {
+ out.writeInt(targets[i].length);
+ for (int j = 0; j < targets[i].length; j++) {
+ targets[i][j].write(out);
+ }
+ }
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ this.poolId = Text.readString(in);
+ this.blocks = new BlockWritable[in.readInt()];
+ for (int i = 0; i < blocks.length; i++) {
+ blocks[i] = new BlockWritable();
+ blocks[i].readFields(in);
+ }
+
+ this.targets = new DatanodeInfoWritable[in.readInt()][];
+ for (int i = 0; i < targets.length; i++) {
+ this.targets[i] = new DatanodeInfoWritable[in.readInt()];
+ for (int j = 0; j < targets[i].length; j++) {
+ targets[i][j] = new DatanodeInfoWritable();
+ targets[i][j].readFields(in);
+ }
+ }
+ }
+
+ @Override
+ public BlockCommand convert() {
+ DatanodeInfo[][] dinfo = new DatanodeInfo[targets.length][];
+ for (int i = 0; i < targets.length; i++) {
+ dinfo[i] = DatanodeInfoWritable.convertDatanodeInfo(targets[i]);
+ }
+ return new BlockCommand(getAction(), poolId, BlockWritable.convert(blocks),
+ dinfo);
+ }
+
+ public static BlockCommandWritable convert(BlockCommand cmd) {
+ if (cmd == null) return null;
+ DatanodeInfo[][] targets = cmd.getTargets();
+ DatanodeInfoWritable[][] dinfo = new DatanodeInfoWritable[targets.length][];
+ for (int i = 0; i < targets.length; i++) {
+ dinfo[i] = DatanodeInfoWritable.convertDatanodeInfo(targets[i]);
+ }
+ return new BlockCommandWritable(cmd.getAction(), cmd.getBlockPoolId(),
+ BlockWritable.convert(cmd.getBlocks()), dinfo);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockRecoveryCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockRecoveryCommandWritable.java
new file mode 100644
index 00000000000..ef7a6dbb23c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/BlockRecoveryCommandWritable.java
@@ -0,0 +1,118 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * BlockRecoveryCommand is an instruction to a data-node to recover the
+ * specified blocks.
+ *
+ * The data-node that receives this command treats itself as a primary data-node
+ * in the recover process.
+ *
+ * Block recovery is identified by a recoveryId, which is also the new
+ * generation stamp, which the block will have after the recovery succeeds.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class BlockRecoveryCommandWritable extends DatanodeCommandWritable {
+ Collection recoveringBlocks;
+
+ /**
+ * Create empty BlockRecoveryCommand.
+ */
+ public BlockRecoveryCommandWritable() { }
+
+ /**
+ * Create BlockRecoveryCommand with the specified capacity for recovering
+ * blocks.
+ */
+ public BlockRecoveryCommandWritable(int capacity) {
+ this(new ArrayList(capacity));
+ }
+
+ public BlockRecoveryCommandWritable(Collection blocks) {
+ super(DatanodeWireProtocol.DNA_RECOVERBLOCK);
+ recoveringBlocks = blocks;
+ }
+
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(BlockRecoveryCommandWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new BlockRecoveryCommandWritable();
+ }
+ });
+ }
+
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ out.writeInt(recoveringBlocks.size());
+ for (RecoveringBlockWritable block : recoveringBlocks) {
+ block.write(out);
+ }
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ int numBlocks = in.readInt();
+ recoveringBlocks = new ArrayList(numBlocks);
+ for (int i = 0; i < numBlocks; i++) {
+ RecoveringBlockWritable b = new RecoveringBlockWritable();
+ b.readFields(in);
+ recoveringBlocks.add(b);
+ }
+ }
+
+ @Override
+ public DatanodeCommand convert() {
+ Collection blks =
+ new ArrayList(recoveringBlocks.size());
+ for (RecoveringBlockWritable b : recoveringBlocks) {
+ blks.add(b.convert());
+ }
+ return new BlockRecoveryCommand(blks);
+ }
+
+ public static BlockRecoveryCommandWritable convert(BlockRecoveryCommand cmd) {
+ if (cmd == null) return null;
+ Collection blks =
+ new ArrayList(cmd.getRecoveringBlocks().size());
+ for (RecoveringBlock b : cmd.getRecoveringBlocks()) {
+ blks.add(RecoveringBlockWritable.convert(b));
+ }
+ return new BlockRecoveryCommandWritable(blks);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeCommandHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeCommandHelper.java
new file mode 100644
index 00000000000..b2e585b99ab
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeCommandHelper.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
+import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+
+/**
+ * Class for translating DatanodeCommandWritable to and from DatanodeCommand.
+ */
+class DatanodeCommandHelper {
+ public static final Log LOG = LogFactory.getLog(DatanodeCommandHelper.class);
+
+ private DatanodeCommandHelper() {
+ /* Private constructor to prevent instantiation */
+ }
+
+ static DatanodeCommand convert(DatanodeCommandWritable cmd) {
+ return cmd.convert();
+ }
+
+ /**
+ * Given a subclass of {@link DatanodeCommand} return the corresponding
+ * writable type.
+ */
+ static DatanodeCommandWritable convert(DatanodeCommand cmd) {
+ switch (cmd.getAction()) {
+ case DatanodeProtocol.DNA_BALANCERBANDWIDTHUPDATE:
+ return BalancerBandwidthCommandWritable
+ .convert((BalancerBandwidthCommand) cmd);
+
+ case DatanodeProtocol.DNA_FINALIZE:
+ return FinalizeCommandWritable.convert((FinalizeCommand)cmd);
+ case DatanodeProtocol.DNA_ACCESSKEYUPDATE:
+ return KeyUpdateCommandWritable.convert((KeyUpdateCommand)cmd);
+ case DatanodeProtocol.DNA_REGISTER:
+ return RegisterCommandWritable.REGISTER;
+ case DatanodeProtocol.DNA_TRANSFER:
+ case DatanodeProtocol.DNA_INVALIDATE:
+ return BlockCommandWritable.convert((BlockCommand)cmd);
+ case UpgradeCommand.UC_ACTION_START_UPGRADE:
+ return UpgradeCommandWritable.convert((UpgradeCommand)cmd);
+ case DatanodeProtocol.DNA_RECOVERBLOCK:
+ return BlockRecoveryCommandWritable.convert((BlockRecoveryCommand)cmd);
+ default:
+ LOG.warn("Unknown DatanodeCommand action - " + cmd.getAction());
+ return null;
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeCommandWritable.java
new file mode 100644
index 00000000000..b3686402aec
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeCommandWritable.java
@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+
+/**
+ * Base class for data-node command.
+ * Issued by the name-node to notify data-nodes what should be done.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class DatanodeCommandWritable extends ServerCommandWritable {
+ public DatanodeCommandWritable() {
+ super();
+ }
+
+ DatanodeCommandWritable(int action) {
+ super(action);
+ }
+
+ /** Method to convert from writable type to internal type */
+ public abstract DatanodeCommand convert();
+
+ public static DatanodeCommandWritable[] convert(DatanodeCommand[] cmds) {
+ DatanodeCommandWritable[] ret = new DatanodeCommandWritable[cmds.length];
+ for (int i = 0; i < cmds.length; i++) {
+ ret[i] = DatanodeCommandHelper.convert(cmds[i]);
+ }
+ return ret;
+ }
+
+ public static DatanodeCommand[] convert(DatanodeCommandWritable[] cmds) {
+ if (cmds == null) return null;
+ DatanodeCommand[] ret = new DatanodeCommand[cmds.length];
+ for (int i = 0; i < cmds.length; i++) {
+ ret[i] = cmds[i].convert();
+ }
+ return ret;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolServerSideTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolServerSideTranslatorR23.java
new file mode 100644
index 00000000000..2c806afd449
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolServerSideTranslatorR23.java
@@ -0,0 +1,170 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeIDWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.LocatedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.NamespaceInfoWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+
+/**
+ * This class is used on the server side. Calls come across the wire for the
+ * protocol family of Release 23 onwards. This class translates the R23 data
+ * types to the native data types used inside the NN as specified in the generic
+ * DatanodeProtocol.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class DatanodeProtocolServerSideTranslatorR23 implements
+ DatanodeWireProtocol {
+ final private DatanodeProtocol server;
+
+ /**
+ * Constructor
+ * @param server - the NN server
+ * @throws IOException
+ */
+ public DatanodeProtocolServerSideTranslatorR23(DatanodeProtocol server)
+ throws IOException {
+ this.server = server;
+ }
+
+ /**
+ * The client side will redirect getProtocolSignature to
+ * getProtocolSignature2.
+ *
+ * However the RPC layer below on the Server side will call getProtocolVersion
+ * and possibly in the future getProtocolSignature. Hence we still implement
+ * it even though the end client's call will never reach here.
+ */
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ /**
+ * Don't forward this to the server. The protocol version and signature is
+ * that of {@link DatanodeProtocol}
+ *
+ */
+ if (!protocol.equals(RPC.getProtocolName(DatanodeWireProtocol.class))) {
+ throw new IOException("Namenode Serverside implements " +
+ RPC.getProtocolName(DatanodeWireProtocol.class) +
+ ". The following requested protocol is unknown: " + protocol);
+ }
+
+ return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+ DatanodeWireProtocol.versionID, DatanodeWireProtocol.class);
+ }
+
+ @Override
+ public ProtocolSignatureWritable
+ getProtocolSignature2(
+ String protocol, long clientVersion, int clientMethodsHash)
+ throws IOException {
+ /**
+ * Don't forward this to the server. The protocol version and signature is
+ * that of {@link DatanodeProtocol}
+ */
+ return ProtocolSignatureWritable.convert(
+ this.getProtocolSignature(protocol, clientVersion, clientMethodsHash));
+ }
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion)
+ throws IOException {
+ if (protocol.equals(RPC.getProtocolName(DatanodeWireProtocol.class))) {
+ return DatanodeWireProtocol.versionID;
+ }
+ throw new IOException("Namenode Serverside implements " +
+ RPC.getProtocolName(DatanodeWireProtocol.class) +
+ ". The following requested protocol is unknown: " + protocol);
+ }
+
+ @Override
+ public DatanodeRegistrationWritable registerDatanode(
+ DatanodeRegistrationWritable registration) throws IOException {
+ return DatanodeRegistrationWritable.convert(server
+ .registerDatanode(registration.convert()));
+ }
+
+ @Override
+ public DatanodeCommandWritable[] sendHeartbeat(
+ DatanodeRegistrationWritable registration, long capacity, long dfsUsed,
+ long remaining, long blockPoolUsed, int xmitsInProgress,
+ int xceiverCount, int failedVolumes) throws IOException {
+ return DatanodeCommandWritable.convert(server.sendHeartbeat(
+ registration.convert(), capacity, dfsUsed, remaining, blockPoolUsed,
+ xmitsInProgress, xceiverCount, failedVolumes));
+ }
+
+ @Override
+ public DatanodeCommandWritable blockReport(
+ DatanodeRegistrationWritable registration, String poolId, long[] blocks)
+ throws IOException {
+ return DatanodeCommandHelper.convert(server.blockReport(
+ registration.convert(), poolId, blocks));
+ }
+
+ @Override
+ public void blockReceivedAndDeleted(
+ DatanodeRegistrationWritable registration, String poolId,
+ ReceivedDeletedBlockInfoWritable[] receivedAndDeletedBlocks)
+ throws IOException {
+ server.blockReceivedAndDeleted(registration.convert(), poolId,
+ ReceivedDeletedBlockInfoWritable.convert(receivedAndDeletedBlocks));
+ }
+
+ @Override
+ public void errorReport(DatanodeRegistrationWritable registration,
+ int errorCode, String msg) throws IOException {
+ server.errorReport(registration.convert(), errorCode, msg);
+ }
+
+ @Override
+ public NamespaceInfoWritable versionRequest() throws IOException {
+ return NamespaceInfoWritable.convert(server.versionRequest());
+ }
+
+ @Override
+ public UpgradeCommandWritable processUpgradeCommand(
+ UpgradeCommandWritable comm) throws IOException {
+ return UpgradeCommandWritable.convert(server.processUpgradeCommand(comm.convert()));
+ }
+
+ @Override
+ public void reportBadBlocks(LocatedBlockWritable[] blocks) throws IOException {
+ server.reportBadBlocks(LocatedBlockWritable.convertLocatedBlock(blocks));
+ }
+
+ @Override
+ public void commitBlockSynchronization(ExtendedBlockWritable block,
+ long newgenerationstamp, long newlength, boolean closeFile,
+ boolean deleteblock, DatanodeIDWritable[] newtargets) throws IOException {
+ server.commitBlockSynchronization(
+ ExtendedBlockWritable.convertExtendedBlock(block), newgenerationstamp,
+ newlength, closeFile, deleteblock,
+ DatanodeIDWritable.convertDatanodeID(newtargets));
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolTranslatorR23.java
new file mode 100644
index 00000000000..1664940474b
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeProtocolTranslatorR23.java
@@ -0,0 +1,193 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.Closeable;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.concurrent.TimeUnit;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeIDWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.LocatedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * This class forwards NN's ClientProtocol calls as RPC calls to the NN server
+ * while translating from the parameter types used in ClientProtocol to those
+ * used in protocolR23Compatile.*.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class DatanodeProtocolTranslatorR23 implements
+ DatanodeProtocol, Closeable {
+ final private DatanodeWireProtocol rpcProxy;
+
+ private static DatanodeWireProtocol createNamenode(
+ InetSocketAddress nameNodeAddr, Configuration conf,
+ UserGroupInformation ugi) throws IOException {
+ return RPC.getProxy(DatanodeWireProtocol.class,
+ DatanodeWireProtocol.versionID, nameNodeAddr, ugi, conf,
+ NetUtils.getSocketFactory(conf, DatanodeWireProtocol.class));
+ }
+
+ /** Create a {@link NameNode} proxy */
+ static DatanodeWireProtocol createNamenodeWithRetry(
+ DatanodeWireProtocol rpcNamenode) {
+ RetryPolicy createPolicy = RetryPolicies
+ .retryUpToMaximumCountWithFixedSleep(5,
+ HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+
+ Map, RetryPolicy> remoteExceptionToPolicyMap =
+ new HashMap, RetryPolicy>();
+ remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
+ createPolicy);
+
+ Map, RetryPolicy> exceptionToPolicyMap =
+ new HashMap, RetryPolicy>();
+ exceptionToPolicyMap.put(RemoteException.class, RetryPolicies
+ .retryByRemoteException(RetryPolicies.TRY_ONCE_THEN_FAIL,
+ remoteExceptionToPolicyMap));
+ RetryPolicy methodPolicy = RetryPolicies.retryByException(
+ RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+ Map methodNameToPolicyMap = new HashMap();
+
+ methodNameToPolicyMap.put("create", methodPolicy);
+
+ return (DatanodeWireProtocol) RetryProxy.create(
+ DatanodeWireProtocol.class, rpcNamenode, methodNameToPolicyMap);
+ }
+
+ public DatanodeProtocolTranslatorR23(InetSocketAddress nameNodeAddr,
+ Configuration conf) throws IOException {
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ rpcProxy = createNamenodeWithRetry(createNamenode(nameNodeAddr, conf, ugi));
+ }
+
+ @Override
+ public void close() {
+ RPC.stopProxy(rpcProxy);
+ }
+
+ @Override
+ public long getProtocolVersion(String protocolName, long clientVersion)
+ throws IOException {
+ return rpcProxy.getProtocolVersion(protocolName, clientVersion);
+ }
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
+ protocol, clientVersion, clientMethodsHash));
+ }
+
+ @Override
+ public DatanodeRegistration registerDatanode(DatanodeRegistration registration)
+ throws IOException {
+ return rpcProxy.registerDatanode(
+ DatanodeRegistrationWritable.convert(registration)).convert();
+ }
+
+ @Override
+ public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
+ long capacity, long dfsUsed, long remaining, long blockPoolUsed,
+ int xmitsInProgress, int xceiverCount, int failedVolumes)
+ throws IOException {
+ return DatanodeCommandWritable.convert(rpcProxy.sendHeartbeat(
+ DatanodeRegistrationWritable.convert(registration), capacity,
+ dfsUsed, remaining, blockPoolUsed, xmitsInProgress, xceiverCount,
+ failedVolumes));
+ }
+
+ @Override
+ public DatanodeCommand blockReport(DatanodeRegistration registration,
+ String poolId, long[] blocks) throws IOException {
+ return rpcProxy.blockReport(
+ DatanodeRegistrationWritable.convert(registration), poolId, blocks)
+ .convert();
+ }
+
+ @Override
+ public void blockReceivedAndDeleted(DatanodeRegistration registration,
+ String poolId, ReceivedDeletedBlockInfo[] receivedAndDeletedBlocks)
+ throws IOException {
+ rpcProxy.blockReceivedAndDeleted(
+ DatanodeRegistrationWritable.convert(registration), poolId,
+ ReceivedDeletedBlockInfoWritable.convert(receivedAndDeletedBlocks));
+ }
+
+ @Override
+ public void errorReport(DatanodeRegistration registration, int errorCode,
+ String msg) throws IOException {
+ rpcProxy.errorReport(DatanodeRegistrationWritable.convert(registration),
+ errorCode, msg);
+ }
+
+ @Override
+ public NamespaceInfo versionRequest() throws IOException {
+ return rpcProxy.versionRequest().convert();
+ }
+
+ @Override
+ public UpgradeCommand processUpgradeCommand(UpgradeCommand cmd)
+ throws IOException {
+ return rpcProxy.processUpgradeCommand(UpgradeCommandWritable.convert(cmd))
+ .convert();
+ }
+
+ @Override
+ public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
+ rpcProxy.reportBadBlocks(LocatedBlockWritable.convertLocatedBlock(blocks));
+ }
+
+ @Override
+ public void commitBlockSynchronization(ExtendedBlock block,
+ long newgenerationstamp, long newlength, boolean closeFile,
+ boolean deleteblock, DatanodeID[] newtargets) throws IOException {
+ rpcProxy.commitBlockSynchronization(
+ ExtendedBlockWritable.convertExtendedBlock(block), newgenerationstamp,
+ newlength, closeFile, deleteblock,
+ DatanodeIDWritable.convertDatanodeID(newtargets));
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java
new file mode 100644
index 00000000000..e2bc2d82a16
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeRegistrationWritable.java
@@ -0,0 +1,113 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeIDWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExportedBlockKeysWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.StorageInfoWritable;
+import org.apache.hadoop.hdfs.security.token.block.ExportedBlockKeys;
+import org.apache.hadoop.hdfs.server.common.StorageInfo;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * DatanodeRegistration class contains all information the name-node needs
+ * to identify and verify a data-node when it contacts the name-node.
+ * This information is sent by data-node with each communication request.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class DatanodeRegistrationWritable implements Writable {
+ static { // register a ctor
+ WritableFactories.setFactory
+ (DatanodeRegistrationWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() { return new DatanodeRegistrationWritable(); }
+ });
+ }
+
+ private DatanodeIDWritable datanodeId;
+ private StorageInfoWritable storageInfo;
+ private ExportedBlockKeysWritable exportedKeys;
+
+ /**
+ * Default constructor.
+ */
+ public DatanodeRegistrationWritable() {
+ this("", new StorageInfo(), new ExportedBlockKeys());
+ }
+
+ /**
+ * Create DatanodeRegistration
+ */
+ public DatanodeRegistrationWritable(String nodeName, StorageInfo info,
+ ExportedBlockKeys keys) {
+ this.datanodeId = new DatanodeIDWritable(nodeName);
+ this.storageInfo = StorageInfoWritable.convert(info);
+ this.exportedKeys = ExportedBlockKeysWritable.convert(keys);
+ }
+
+ /////////////////////////////////////////////////
+ // Writable
+ /////////////////////////////////////////////////
+ /** {@inheritDoc} */
+ public void write(DataOutput out) throws IOException {
+ datanodeId.write(out);
+
+ //TODO: move it to DatanodeID once HADOOP-2797 has been committed
+ out.writeShort(datanodeId.ipcPort);
+
+ storageInfo.write(out);
+ exportedKeys.write(out);
+ }
+
+ /** {@inheritDoc} */
+ public void readFields(DataInput in) throws IOException {
+ datanodeId.readFields(in);
+
+ //TODO: move it to DatanodeID once HADOOP-2797 has been committed
+ datanodeId.ipcPort = in.readShort() & 0x0000ffff;
+
+ storageInfo.readFields(in);
+ exportedKeys.readFields(in);
+ }
+
+ public DatanodeRegistration convert() {
+ DatanodeRegistration dnReg = new DatanodeRegistration(datanodeId.name,
+ storageInfo.convert(), exportedKeys.convert());
+ dnReg.setIpcPort(datanodeId.ipcPort);
+ return dnReg;
+ }
+
+ public static DatanodeRegistrationWritable convert(DatanodeRegistration dnReg) {
+ if (dnReg == null) return null;
+ DatanodeRegistrationWritable ret = new DatanodeRegistrationWritable(
+ dnReg.getName(), dnReg.storageInfo, dnReg.exportedKeys);
+ ret.datanodeId.ipcPort = dnReg.ipcPort;
+ return ret;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeWireProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeWireProtocol.java
new file mode 100644
index 00000000000..f630053bf9a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/DatanodeWireProtocol.java
@@ -0,0 +1,183 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.IOException;
+
+import org.apache.avro.reflect.Nullable;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeIDWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.LocatedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.NamespaceInfoWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+
+/**********************************************************************
+ * Protocol that a DFS datanode uses to communicate with the NameNode.
+ * It's used to upload current load information and block reports.
+ *
+ * The only way a NameNode can communicate with a DataNode is by
+ * returning values from these functions.
+ *
+ **********************************************************************/
+@KerberosInfo(
+ serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY,
+ clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
+@InterfaceAudience.Private
+public interface DatanodeWireProtocol extends VersionedProtocol {
+ /**
+ * The rules for changing this protocol are the same as that for
+ * {@link ClientNamenodeWireProtocol} - see that java file for details.
+ */
+ public static final long versionID = 28L;
+
+ // error code
+ final static int NOTIFY = 0;
+ final static int DISK_ERROR = 1; // there are still valid volumes on DN
+ final static int INVALID_BLOCK = 2;
+ final static int FATAL_DISK_ERROR = 3; // no valid volumes left on DN
+
+ /**
+ * Determines actions that data node should perform
+ * when receiving a datanode command.
+ */
+ final static int DNA_UNKNOWN = 0; // unknown action
+ final static int DNA_TRANSFER = 1; // transfer blocks to another datanode
+ final static int DNA_INVALIDATE = 2; // invalidate blocks
+ final static int DNA_SHUTDOWN = 3; // shutdown node
+ final static int DNA_REGISTER = 4; // re-register
+ final static int DNA_FINALIZE = 5; // finalize previous upgrade
+ final static int DNA_RECOVERBLOCK = 6; // request a block recovery
+ final static int DNA_ACCESSKEYUPDATE = 7; // update access key
+ final static int DNA_BALANCERBANDWIDTHUPDATE = 8; // update balancer bandwidth
+
+ /**
+ * Register Datanode.
+ * @return updated {@link DatanodeRegistrationWritable}, which contains
+ * new storageID if the datanode did not have one and
+ * registration ID for further communication.
+ */
+ public DatanodeRegistrationWritable registerDatanode(
+ DatanodeRegistrationWritable registration) throws IOException;
+ /**
+ * sendHeartbeat() tells the NameNode that the DataNode is still
+ * alive and well. Includes some status info, too.
+ * It also gives the NameNode a chance to return
+ * an array of "DatanodeCommand" objects.
+ * A DatanodeCommand tells the DataNode to invalidate local block(s),
+ * or to copy them to other DataNodes, etc.
+ * @param registration datanode registration information
+ * @param capacity total storage capacity available at the datanode
+ * @param dfsUsed storage used by HDFS
+ * @param remaining remaining storage available for HDFS
+ * @param blockPoolUsed storage used by the block pool
+ * @param xmitsInProgress number of transfers from this datanode to others
+ * @param xceiverCount number of active transceiver threads
+ * @param failedVolumes number of failed volumes
+ * @throws IOException on error
+ */
+ @Nullable
+ public DatanodeCommandWritable[] sendHeartbeat(
+ DatanodeRegistrationWritable registration, long capacity, long dfsUsed,
+ long remaining, long blockPoolUsed, int xmitsInProgress,
+ int xceiverCount, int failedVolumes) throws IOException;
+
+ /**
+ * blockReport() tells the NameNode about all the locally-stored blocks.
+ * The NameNode returns an array of Blocks that have become obsolete
+ * and should be deleted. This function is meant to upload *all*
+ * the locally-stored blocks. It's invoked upon startup and then
+ * infrequently afterwards.
+ * @param registration
+ * @param poolId - the block pool ID for the blocks
+ * @param blocks - the block list as an array of longs.
+ * Each block is represented as 2 longs.
+ * This is done instead of Block[] to reduce memory used by block reports.
+ *
+ * @return - the next command for DN to process.
+ * @throws IOException
+ */
+ public DatanodeCommandWritable blockReport(
+ DatanodeRegistrationWritable registration, String poolId, long[] blocks)
+ throws IOException;
+
+ /**
+ * blockReceivedAndDeleted() allows the DataNode to tell the NameNode about
+ * recently-received and -deleted block data.
+ *
+ * For the case of received blocks, a hint for preferred replica to be
+ * deleted when there is any excessive blocks is provided.
+ * For example, whenever client code
+ * writes a new Block here, or another DataNode copies a Block to
+ * this DataNode, it will call blockReceived().
+ */
+ public void blockReceivedAndDeleted(
+ DatanodeRegistrationWritable registration, String poolId,
+ ReceivedDeletedBlockInfoWritable[] receivedAndDeletedBlocks)
+ throws IOException;
+
+ /**
+ * errorReport() tells the NameNode about something that has gone
+ * awry. Useful for debugging.
+ */
+ public void errorReport(DatanodeRegistrationWritable registration,
+ int errorCode, String msg) throws IOException;
+
+ public NamespaceInfoWritable versionRequest() throws IOException;
+
+ /**
+ * This is a very general way to send a command to the name-node during
+ * distributed upgrade process.
+ *
+ * The generosity is because the variety of upgrade commands is unpredictable.
+ * The reply from the name-node is also received in the form of an upgrade
+ * command.
+ *
+ * @return a reply in the form of an upgrade command
+ */
+ UpgradeCommandWritable processUpgradeCommand(UpgradeCommandWritable comm)
+ throws IOException;
+
+ /**
+ * same as {@link ClientProtocol#reportBadBlocks(LocatedBlock[])}
+ * }
+ */
+ public void reportBadBlocks(LocatedBlockWritable[] blocks) throws IOException;
+
+ /**
+ * Commit block synchronization in lease recovery
+ */
+ public void commitBlockSynchronization(ExtendedBlockWritable block,
+ long newgenerationstamp, long newlength, boolean closeFile,
+ boolean deleteblock, DatanodeIDWritable[] newtargets) throws IOException;
+
+ /**
+ * This method is defined to get the protocol signature using
+ * the R23 protocol - hence we have added the suffix of 2 the method name
+ * to avoid conflict.
+ */
+ public ProtocolSignatureWritable getProtocolSignature2(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/FinalizeCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/FinalizeCommandWritable.java
new file mode 100644
index 00000000000..2de91ad9aea
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/FinalizeCommandWritable.java
@@ -0,0 +1,88 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.FinalizeCommand;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+import org.apache.hadoop.io.WritableUtils;
+
+/**
+ * A FinalizeCommand is an instruction from namenode to finalize the previous
+ * upgrade to a datanode
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class FinalizeCommandWritable extends DatanodeCommandWritable {
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(FinalizeCommandWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new FinalizeCommandWritable();
+ }
+ });
+ }
+
+ String blockPoolId;
+
+ private FinalizeCommandWritable() {
+ this(null);
+ }
+
+ public FinalizeCommandWritable(String bpid) {
+ super(DatanodeWireProtocol.DNA_FINALIZE);
+ blockPoolId = bpid;
+ }
+
+ public String getBlockPoolId() {
+ return blockPoolId;
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ blockPoolId = WritableUtils.readString(in);
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ WritableUtils.writeString(out, blockPoolId);
+ }
+
+ @Override
+ public DatanodeCommand convert() {
+ return new FinalizeCommand(blockPoolId);
+ }
+
+ public static FinalizeCommandWritable convert(FinalizeCommand cmd) {
+ if (cmd == null) {
+ return null;
+ }
+ return new FinalizeCommandWritable(cmd.getBlockPoolId());
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeProtocolServerSideTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeProtocolServerSideTranslatorR23.java
new file mode 100644
index 00000000000..9b6c63b396c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeProtocolServerSideTranslatorR23.java
@@ -0,0 +1,116 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+
+/**
+ * This class is used on the server side. Calls come across the wire for the
+ * protocol family of Release 23 onwards. This class translates the R23 data
+ * types to the internal data types used inside the DN as specified in the
+ * generic InterDatanodeProtocol.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class InterDatanodeProtocolServerSideTranslatorR23 implements
+ InterDatanodeWireProtocol {
+ final private InterDatanodeProtocol server;
+
+ /**
+ *
+ * @param server - datanode server
+ * @throws IOException
+ */
+ public InterDatanodeProtocolServerSideTranslatorR23(
+ InterDatanodeProtocol server) throws IOException {
+ this.server = server;
+ }
+
+ /**
+ * the client side will redirect getProtocolSignature to
+ * getProtocolSignature2.
+ *
+ * However the RPC layer below on the Server side will call getProtocolVersion
+ * and possibly in the future getProtocolSignature. Hence we still implement
+ * it even though the end client's call will never reach here.
+ */
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ /**
+ * Don't forward this to the server. The protocol version and signature is
+ * that of {@link InterDatanodeProtocol}
+ */
+ if (!protocol.equals(RPC.getProtocolName(InterDatanodeWireProtocol.class))) {
+ throw new IOException("Datanode Serverside implements "
+ + InterDatanodeWireProtocol.class
+ + ". The following requested protocol is unknown: " + protocol);
+ }
+
+ return ProtocolSignature.getProtocolSignature(clientMethodsHash,
+ InterDatanodeWireProtocol.versionID, InterDatanodeWireProtocol.class);
+ }
+
+ @Override
+ public ProtocolSignatureWritable getProtocolSignature2(String protocol,
+ long clientVersion, int clientMethodsHash) throws IOException {
+ /**
+ * Don't forward this to the server. The protocol version and signature is
+ * that of {@link ClientNamenodeProtocol}
+ */
+ return ProtocolSignatureWritable.convert(this.getProtocolSignature(
+ protocol, clientVersion, clientMethodsHash));
+
+ }
+
+ @Override
+ public long getProtocolVersion(String protocol, long clientVersion)
+ throws IOException {
+ if (protocol.equals(RPC.getProtocolName(InterDatanodeWireProtocol.class))) {
+ return InterDatanodeWireProtocol.versionID;
+ }
+ throw new IOException("Datanode Serverside implements "
+ + InterDatanodeWireProtocol.class
+ + ". The following requested protocol is unknown: " + protocol);
+ }
+
+ @Override
+ public ReplicaRecoveryInfoWritable initReplicaRecovery(
+ RecoveringBlockWritable rBlock) throws IOException {
+ return ReplicaRecoveryInfoWritable.convert(server
+ .initReplicaRecovery(rBlock.convert()));
+ }
+
+ @Override
+ public ExtendedBlockWritable updateReplicaUnderRecovery(
+ ExtendedBlockWritable oldBlock, long recoveryId, long newLength)
+ throws IOException {
+ ExtendedBlock b = ExtendedBlockWritable.convertExtendedBlock(oldBlock);
+ return ExtendedBlockWritable.convertExtendedBlock(server
+ .updateReplicaUnderRecovery(b, recoveryId, newLength));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeProtocolTranslatorR23.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeProtocolTranslatorR23.java
new file mode 100644
index 00000000000..730ec1568d5
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeProtocolTranslatorR23.java
@@ -0,0 +1,96 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.IOException;
+import java.net.InetSocketAddress;
+
+import javax.net.SocketFactory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
+import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.security.UserGroupInformation;
+
+/**
+ * This class forwards InterDatanodeProtocol calls as RPC to the DN server while
+ * translating from the parameter types used in InterDatanodeProtocol to those
+ * used in protocolR23Compatile.*.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Stable
+public class InterDatanodeProtocolTranslatorR23 implements
+ InterDatanodeProtocol {
+
+ final private InterDatanodeWireProtocol rpcProxy;
+
+ /** used for testing */
+ public InterDatanodeProtocolTranslatorR23(InetSocketAddress addr,
+ UserGroupInformation ugi, Configuration conf, SocketFactory factory,
+ int socketTimeout)
+ throws IOException {
+ rpcProxy = createInterDatanodeProtocolProxy(addr, ugi, conf, factory,
+ socketTimeout);
+ }
+
+ static InterDatanodeWireProtocol createInterDatanodeProtocolProxy(
+ InetSocketAddress addr, UserGroupInformation ugi, Configuration conf,
+ SocketFactory factory, int socketTimeout) throws IOException {
+ return RPC.getProxy(InterDatanodeWireProtocol.class,
+ InterDatanodeWireProtocol.versionID, addr, ugi, conf, factory,
+ socketTimeout);
+ }
+
+ @Override
+ public ProtocolSignature getProtocolSignature(String protocolName,
+ long clientVersion, int clientMethodHash) throws IOException {
+ return ProtocolSignatureWritable.convert(rpcProxy.getProtocolSignature2(
+ protocolName, clientVersion, clientMethodHash));
+ }
+
+ @Override
+ public long getProtocolVersion(String protocolName, long clientVersion)
+ throws IOException {
+ return rpcProxy.getProtocolVersion(protocolName, clientVersion);
+ }
+
+ @Override
+ public ReplicaRecoveryInfo initReplicaRecovery(RecoveringBlock rBlock)
+ throws IOException {
+ return rpcProxy
+ .initReplicaRecovery(RecoveringBlockWritable.convert(rBlock)).convert();
+ }
+
+ @Override
+ public ExtendedBlock updateReplicaUnderRecovery(ExtendedBlock oldBlock,
+ long recoveryId, long newLength) throws IOException {
+ ExtendedBlockWritable eb = ExtendedBlockWritable
+ .convertExtendedBlock(oldBlock);
+ ExtendedBlockWritable b = rpcProxy.updateReplicaUnderRecovery(eb,
+ recoveryId, newLength);
+ return ExtendedBlockWritable.convertExtendedBlock(b);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeWireProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeWireProtocol.java
new file mode 100644
index 00000000000..40ad845f9bc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/InterDatanodeWireProtocol.java
@@ -0,0 +1,73 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ProtocolSignatureWritable;
+import org.apache.hadoop.ipc.VersionedProtocol;
+import org.apache.hadoop.security.KerberosInfo;
+
+/** An inter-datanode protocol for updating generation stamp
+ */
+@KerberosInfo(
+ serverPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY,
+ clientPrincipal = DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY)
+@InterfaceAudience.Private
+public interface InterDatanodeWireProtocol extends VersionedProtocol {
+ public static final Log LOG =
+ LogFactory.getLog(InterDatanodeWireProtocol.class);
+ /**
+ * The rules for changing this protocol are the same as that for
+ * {@link ClientNamenodeWireProtocol} - see that java file for details.
+ * 6: Add block pool ID to Block
+ */
+ public static final long versionID = 6L;
+
+ /**
+ * Initialize a replica recovery.
+ *
+ * @return actual state of the replica on this data-node or
+ * null if data-node does not have the replica.
+ */
+ ReplicaRecoveryInfoWritable initReplicaRecovery(RecoveringBlockWritable rBlock)
+ throws IOException;
+
+ /**
+ * Update replica with the new generation stamp and length.
+ */
+ ExtendedBlockWritable updateReplicaUnderRecovery(
+ ExtendedBlockWritable oldBlock, long recoveryId, long newLength)
+ throws IOException;
+
+ /**
+ * This method is defined to get the protocol signature using
+ * the R23 protocol - hence we have added the suffix of 2 to the method name
+ * to avoid conflict.
+ */
+ public ProtocolSignatureWritable getProtocolSignature2(
+ String protocol, long clientVersion, int clientMethodsHash)
+ throws IOException;
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/KeyUpdateCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/KeyUpdateCommandWritable.java
new file mode 100644
index 00000000000..2de6b21f8e7
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/KeyUpdateCommandWritable.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExportedBlockKeysWritable;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.KeyUpdateCommand;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class KeyUpdateCommandWritable extends DatanodeCommandWritable {
+ private ExportedBlockKeysWritable keys;
+
+ KeyUpdateCommandWritable() {
+ this(new ExportedBlockKeysWritable());
+ }
+
+ public KeyUpdateCommandWritable(ExportedBlockKeysWritable keys) {
+ super(DatanodeWireProtocol.DNA_ACCESSKEYUPDATE);
+ this.keys = keys;
+ }
+
+ public ExportedBlockKeysWritable getExportedKeys() {
+ return this.keys;
+ }
+
+ // ///////////////////////////////////////////////
+ // Writable
+ // ///////////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(KeyUpdateCommandWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new KeyUpdateCommandWritable();
+ }
+ });
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ keys.write(out);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ keys.readFields(in);
+ }
+
+ @Override
+ public DatanodeCommand convert() {
+ return new KeyUpdateCommand(keys.convert());
+ }
+
+ public static KeyUpdateCommandWritable convert(KeyUpdateCommand cmd) {
+ if (cmd == null) {
+ return null;
+ }
+ return new KeyUpdateCommandWritable(ExportedBlockKeysWritable.convert(cmd
+ .getExportedKeys()));
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ReceivedDeletedBlockInfoWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ReceivedDeletedBlockInfoWritable.java
new file mode 100644
index 00000000000..5d37890c7fa
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ReceivedDeletedBlockInfoWritable.java
@@ -0,0 +1,95 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.hdfs.protocolR23Compatible.BlockWritable;
+import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+
+/**
+ * A data structure to store Block and delHints together, used to send
+ * received/deleted ACKs.
+ */
+public class ReceivedDeletedBlockInfoWritable implements Writable {
+ BlockWritable block;
+ String delHints;
+
+ public final static String TODELETE_HINT = "-";
+
+ public ReceivedDeletedBlockInfoWritable() {
+ }
+
+ public ReceivedDeletedBlockInfoWritable(BlockWritable blk, String delHints) {
+ this.block = blk;
+ this.delHints = delHints;
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ this.block.write(out);
+ Text.writeString(out, this.delHints);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ this.block = new BlockWritable();
+ this.block.readFields(in);
+ this.delHints = Text.readString(in);
+ }
+
+ public String toString() {
+ return block.toString() + ", delHint: " + delHints;
+ }
+
+ public static ReceivedDeletedBlockInfo[] convert(
+ ReceivedDeletedBlockInfoWritable[] rdBlocks) {
+ ReceivedDeletedBlockInfo[] ret =
+ new ReceivedDeletedBlockInfo[rdBlocks.length];
+ for (int i = 0; i < rdBlocks.length; i++) {
+ ret[i] = rdBlocks[i].convert();
+ }
+ return ret;
+ }
+
+ public static ReceivedDeletedBlockInfoWritable[] convert(
+ ReceivedDeletedBlockInfo[] blocks) {
+ ReceivedDeletedBlockInfoWritable[] ret =
+ new ReceivedDeletedBlockInfoWritable[blocks.length];
+ for (int i = 0; i < blocks.length; i++) {
+ ret[i] = convert(blocks[i]);
+ }
+ return ret;
+ }
+
+ public ReceivedDeletedBlockInfo convert() {
+ return new ReceivedDeletedBlockInfo(block.convert(), delHints);
+ }
+
+ public static ReceivedDeletedBlockInfoWritable convert(
+ ReceivedDeletedBlockInfo b) {
+ if (b == null) return null;
+ return new ReceivedDeletedBlockInfoWritable(BlockWritable.convert(b
+ .getBlock()), b.getDelHints());
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/RecoveringBlockWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/RecoveringBlockWritable.java
new file mode 100644
index 00000000000..0324f265c16
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/RecoveringBlockWritable.java
@@ -0,0 +1,104 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocolR23Compatible.DatanodeInfoWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable;
+import org.apache.hadoop.hdfs.protocolR23Compatible.LocatedBlockWritable;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * This is a block with locations from which it should be recovered and the new
+ * generation stamp, which the block will have after successful recovery.
+ *
+ * The new generation stamp of the block, also plays role of the recovery id.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RecoveringBlockWritable implements Writable {
+ private long newGenerationStamp;
+ private LocatedBlockWritable locatedBlock;
+
+ /**
+ * Create empty RecoveringBlock.
+ */
+ public RecoveringBlockWritable() {
+ locatedBlock = new LocatedBlockWritable();
+ newGenerationStamp = -1L;
+ }
+
+ /**
+ * Create RecoveringBlock.
+ */
+ public RecoveringBlockWritable(ExtendedBlockWritable b,
+ DatanodeInfoWritable[] locs, long newGS) {
+ locatedBlock = new LocatedBlockWritable(b, locs, -1, false);
+ this.newGenerationStamp = newGS;
+ }
+
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(RecoveringBlockWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new RecoveringBlockWritable();
+ }
+ });
+ }
+
+ public void write(DataOutput out) throws IOException {
+ locatedBlock.write(out);
+ out.writeLong(newGenerationStamp);
+ }
+
+ public void readFields(DataInput in) throws IOException {
+ locatedBlock = new LocatedBlockWritable();
+ locatedBlock.readFields(in);
+ newGenerationStamp = in.readLong();
+ }
+
+ public RecoveringBlock convert() {
+ ExtendedBlockWritable eb = locatedBlock.getBlock();
+ DatanodeInfoWritable[] dnInfo = locatedBlock.getLocations();
+ return new RecoveringBlock(ExtendedBlockWritable.convertExtendedBlock(eb),
+ DatanodeInfoWritable.convertDatanodeInfo(dnInfo), newGenerationStamp);
+ }
+
+ public static RecoveringBlockWritable convert(RecoveringBlock rBlock) {
+ if (rBlock == null) {
+ return null;
+ }
+ ExtendedBlockWritable eb = ExtendedBlockWritable
+ .convertExtendedBlock(rBlock.getBlock());
+ DatanodeInfoWritable[] dnInfo = DatanodeInfoWritable
+ .convertDatanodeInfo(rBlock.getLocations());
+ return new RecoveringBlockWritable(eb, dnInfo,
+ rBlock.getNewGenerationStamp());
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/RegisterCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/RegisterCommandWritable.java
new file mode 100644
index 00000000000..ae828d8f183
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/RegisterCommandWritable.java
@@ -0,0 +1,69 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * A RegisterCommand is an instruction to a datanode to register with the
+ * namenode.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class RegisterCommandWritable extends DatanodeCommandWritable {
+ public static final RegisterCommandWritable REGISTER =
+ new RegisterCommandWritable();
+
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(RegisterCommandWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new RegisterCommandWritable();
+ }
+ });
+ }
+
+ public RegisterCommandWritable() {
+ super(DatanodeWireProtocol.DNA_REGISTER);
+ }
+
+ @Override
+ public void readFields(DataInput in) { /* Nothing to read */
+ }
+
+ @Override
+ public void write(DataOutput out) { /* Nothing to write */
+ }
+
+ @Override
+ public DatanodeCommand convert() {
+ return RegisterCommand.REGISTER;
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ReplicaRecoveryInfoWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ReplicaRecoveryInfoWritable.java
new file mode 100644
index 00000000000..e6853600e42
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ReplicaRecoveryInfoWritable.java
@@ -0,0 +1,87 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.protocolR23Compatible.BlockWritable;
+import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.ReplicaState;
+import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * Replica recovery information.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class ReplicaRecoveryInfoWritable implements Writable {
+ private int originalState;
+ private BlockWritable block;
+
+ public ReplicaRecoveryInfoWritable() {
+ }
+
+ public ReplicaRecoveryInfoWritable(long blockId, long diskLen, long gs,
+ ReplicaState rState) {
+ block = new BlockWritable(blockId, diskLen, gs);
+ originalState = rState.getValue();
+ }
+
+ // /////////////////////////////////////////
+ // Writable
+ // /////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory(ReplicaRecoveryInfoWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() {
+ return new ReplicaRecoveryInfoWritable();
+ }
+ });
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ block = new BlockWritable();
+ block.readFields(in);
+ originalState = in.readInt();
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ block.write(out);
+ out.writeInt(originalState);
+ }
+
+ public static ReplicaRecoveryInfoWritable convert(ReplicaRecoveryInfo rrInfo) {
+ return new ReplicaRecoveryInfoWritable(rrInfo.getBlockId(),
+ rrInfo.getNumBytes(), rrInfo.getGenerationStamp(),
+ rrInfo.getOriginalReplicaState());
+ }
+
+ public ReplicaRecoveryInfo convert() {
+ return new ReplicaRecoveryInfo(block.getBlockId(), block.getNumBytes(),
+ block.getGenerationStamp(), ReplicaState.getState(originalState));
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ServerCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ServerCommandWritable.java
new file mode 100644
index 00000000000..e4dcfc10c9e
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/ServerCommandWritable.java
@@ -0,0 +1,75 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.Writable;
+
+/**
+ * Base class for a server command.
+ * Issued by the name-node to notify other servers what should be done.
+ * Commands are defined by actions defined in respective protocols.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public abstract class ServerCommandWritable implements Writable {
+ private int action;
+
+ /**
+ * Unknown server command constructor.
+ * Creates a command with action 0.
+ */
+ public ServerCommandWritable() {
+ this(0);
+ }
+
+ /**
+ * Create a command for the specified action.
+ * Actions are protocol specific.
+ * @param action
+ */
+ public ServerCommandWritable(int action) {
+ this.action = action;
+ }
+
+ /**
+ * Get server command action.
+ * @return action code.
+ */
+ public int getAction() {
+ return this.action;
+ }
+
+ ///////////////////////////////////////////
+ // Writable
+ ///////////////////////////////////////////
+ @Override
+ public void write(DataOutput out) throws IOException {
+ out.writeInt(this.action);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ this.action = in.readInt();
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/UpgradeCommandWritable.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/UpgradeCommandWritable.java
new file mode 100644
index 00000000000..ed3a70f0773
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocolR23Compatible/UpgradeCommandWritable.java
@@ -0,0 +1,106 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.protocolR23Compatible;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.io.WritableFactories;
+import org.apache.hadoop.io.WritableFactory;
+
+/**
+ * This as a generic distributed upgrade command.
+ *
+ * During the upgrade cluster components send upgrade commands to each other
+ * in order to obtain or share information with them.
+ * It is supposed that each upgrade defines specific upgrade command by
+ * deriving them from this class.
+ * The upgrade command contains version of the upgrade, which is verified
+ * on the receiving side and current status of the upgrade.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public class UpgradeCommandWritable extends DatanodeCommandWritable {
+ final static int UC_ACTION_UNKNOWN = DatanodeWireProtocol.DNA_UNKNOWN;
+ public final static int UC_ACTION_REPORT_STATUS = 100; // report upgrade status
+ public final static int UC_ACTION_START_UPGRADE = 101; // start upgrade
+
+ private int version;
+ private short upgradeStatus;
+
+ public UpgradeCommandWritable() {
+ super(UC_ACTION_UNKNOWN);
+ this.version = 0;
+ this.upgradeStatus = 0;
+ }
+
+ public UpgradeCommandWritable(int action, int version, short status) {
+ super(action);
+ this.version = version;
+ this.upgradeStatus = status;
+ }
+
+ public int getVersion() {
+ return this.version;
+ }
+
+ public short getCurrentStatus() {
+ return this.upgradeStatus;
+ }
+
+ /////////////////////////////////////////////////
+ // Writable
+ /////////////////////////////////////////////////
+ static { // register a ctor
+ WritableFactories.setFactory
+ (UpgradeCommandWritable.class,
+ new WritableFactory() {
+ public Writable newInstance() { return new UpgradeCommandWritable(); }
+ });
+ }
+
+ @Override
+ public void write(DataOutput out) throws IOException {
+ super.write(out);
+ out.writeInt(this.version);
+ out.writeShort(this.upgradeStatus);
+ }
+
+ @Override
+ public void readFields(DataInput in) throws IOException {
+ super.readFields(in);
+ this.version = in.readInt();
+ this.upgradeStatus = in.readShort();
+ }
+
+ @Override
+ public UpgradeCommand convert() {
+ return new UpgradeCommand(getAction(), version, upgradeStatus);
+ }
+
+ public static UpgradeCommandWritable convert(UpgradeCommand cmd) {
+ if (cmd == null) return null;
+ return new UpgradeCommandWritable(cmd.getAction(), cmd.getVersion(),
+ cmd.getCurrentStatus());
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
index b4f4e7c4d1f..2144203965c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
@@ -1146,10 +1146,9 @@ public class DFSAdmin extends FsShell {
conf.get(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, ""));
// Create the client
- ClientDatanodeProtocol dnProtocol = RPC.getProxy(
- ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
- datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf,
- ClientDatanodeProtocol.class));
+ ClientDatanodeProtocol dnProtocol =
+ DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
+ NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
return dnProtocol;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
index 1e853933433..eb8af25d26f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DelegationTokenFetcher.java
@@ -39,14 +39,17 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
import org.apache.hadoop.hdfs.server.namenode.CancelDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.GetDelegationTokenServlet;
import org.apache.hadoop.hdfs.server.namenode.RenewDelegationTokenServlet;
import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
@@ -149,34 +152,31 @@ public class DelegationTokenFetcher {
DataInputStream in = new DataInputStream(
new ByteArrayInputStream(token.getIdentifier()));
id.readFields(in);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Token (" + id + ") for " + token.getService());
+ System.out.println("Token (" + id + ") for " +
+ token.getService());
+ }
+ } else if (cancel) {
+ for(Token> token: readTokens(tokenFile, conf)) {
+ if (token.isManaged()) {
+ token.cancel(conf);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Cancelled token for " + token.getService());
+ }
}
}
- return null;
- }
-
- if (webUrl != null) {
- if (renew) {
- long result;
- for (Token> token : readTokens(tokenFile, conf)) {
- result = renewDelegationToken(webUrl,
- (Token) token);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Renewed token via " + webUrl + " for "
- + token.getService() + " until: " + new Date(result));
+ } else if (renew) {
+ for (Token> token : readTokens(tokenFile, conf)) {
+ if (token.isManaged()) {
+ long result = token.renew(conf);
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Renewed token for " + token.getService()
+ + " until: " + new Date(result));
}
}
- } else if (cancel) {
- for (Token> token : readTokens(tokenFile, conf)) {
- cancelDelegationToken(webUrl,
- (Token) token);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Cancelled token via " + webUrl + " for "
- + token.getService());
- }
- }
- } else {
+ }
+ } else {
+ // otherwise we are fetching
+ if (webUrl != null) {
Credentials creds = getDTfromRemote(webUrl, renewer);
creds.writeTokenStorageFile(tokenFile, conf);
for (Token> token : creds.getAllTokens()) {
@@ -185,29 +185,8 @@ public class DelegationTokenFetcher {
+ token.getService() + " into " + tokenFile);
}
}
- }
- } else {
- FileSystem fs = FileSystem.get(conf);
- if (cancel) {
- for (Token> token : readTokens(tokenFile, conf)) {
- ((DistributedFileSystem) fs)
- .cancelDelegationToken((Token) token);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Cancelled token for "
- + token.getService());
- }
- }
- } else if (renew) {
- long result;
- for (Token> token : readTokens(tokenFile, conf)) {
- result = ((DistributedFileSystem) fs)
- .renewDelegationToken((Token) token);
- if(LOG.isDebugEnabled()) {
- LOG.debug("Renewed token for " + token.getService()
- + " until: " + new Date(result));
- }
- }
} else {
+ FileSystem fs = FileSystem.get(conf);
Token> token = fs.getDelegationToken(renewer);
Credentials cred = new Credentials();
cred.addToken(token.getService(), token);
@@ -230,8 +209,9 @@ public class DelegationTokenFetcher {
try {
StringBuffer url = new StringBuffer();
if (renewer != null) {
- url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC).append("?").
- append(GetDelegationTokenServlet.RENEWER).append("=").append(renewer);
+ url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC)
+ .append("?").append(GetDelegationTokenServlet.RENEWER).append("=")
+ .append(renewer);
} else {
url.append(nnAddr).append(GetDelegationTokenServlet.PATH_SPEC);
}
@@ -248,6 +228,12 @@ public class DelegationTokenFetcher {
Credentials ts = new Credentials();
dis = new DataInputStream(in);
ts.readFields(dis);
+ for(Token> token: ts.getAllTokens()) {
+ token.setKind(HftpFileSystem.TOKEN_KIND);
+ token.setService(new Text(SecurityUtil.buildDTServiceName
+ (remoteURL.toURI(),
+ DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT)));
+ }
return ts;
} catch (Exception e) {
throw new IOException("Unable to obtain remote token", e);
@@ -295,7 +281,8 @@ public class DelegationTokenFetcher {
IOUtils.cleanup(LOG, in);
if(e!=null) {
- LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+ LOG.info("rethrowing exception from HTTP request: " +
+ e.getLocalizedMessage());
throw e;
}
throw ie;
@@ -383,7 +370,8 @@ public class DelegationTokenFetcher {
IOUtils.cleanup(LOG, in);
if(e!=null) {
- LOG.info("rethrowing exception from HTTP request: " + e.getLocalizedMessage());
+ LOG.info("rethrowing exception from HTTP request: " +
+ e.getLocalizedMessage());
throw e;
}
throw ie;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java
index 6e5a8dd1d9e..c4b3d26824d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/AuthFilter.java
@@ -17,12 +17,17 @@
*/
package org.apache.hadoop.hdfs.web;
-import java.util.Map;
+import java.io.IOException;
import java.util.Properties;
+import javax.servlet.FilterChain;
import javax.servlet.FilterConfig;
+import javax.servlet.ServletException;
+import javax.servlet.ServletRequest;
+import javax.servlet.ServletResponse;
+import javax.servlet.http.HttpServletRequest;
-import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.web.resources.DelegationParam;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
@@ -41,30 +46,36 @@ public class AuthFilter extends AuthenticationFilter {
* The prefix is removed from the returned property names.
*
* @param prefix parameter not used.
- * @param config parameter not used.
+ * @param config parameter contains the initialization values.
* @return Hadoop-Auth configuration properties.
+ * @throws ServletException
*/
@Override
- protected Properties getConfiguration(String prefix, FilterConfig config) {
- final Configuration conf = new Configuration();
- final Properties p = new Properties();
-
- //set authentication type
+ protected Properties getConfiguration(String prefix, FilterConfig config)
+ throws ServletException {
+ final Properties p = super.getConfiguration(CONF_PREFIX, config);
+ // set authentication type
p.setProperty(AUTH_TYPE, UserGroupInformation.isSecurityEnabled()?
KerberosAuthenticationHandler.TYPE: PseudoAuthenticationHandler.TYPE);
//For Pseudo Authentication, allow anonymous.
p.setProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED, "true");
//set cookie path
p.setProperty(COOKIE_PATH, "/");
-
- //set other configurations with CONF_PREFIX
- for (Map.Entry entry : conf) {
- final String key = entry.getKey();
- if (key.startsWith(CONF_PREFIX)) {
- //remove prefix from the key and set property
- p.setProperty(key.substring(CONF_PREFIX.length()), conf.get(key));
- }
- }
return p;
}
+
+ @Override
+ public void doFilter(ServletRequest request, ServletResponse response,
+ FilterChain filterChain) throws IOException, ServletException {
+ HttpServletRequest httpRequest = (HttpServletRequest) request;
+ String tokenString = httpRequest
+ .getParameter(DelegationParam.NAME);
+ if (tokenString != null) {
+ //Token is present in the url, therefore token will be used for
+ //authentication, bypass kerberos authentication.
+ filterChain.doFilter(httpRequest, response);
+ return;
+ }
+ super.doFilter(request, response, filterChain);
+ }
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
index adf639c32bd..d166d63a98a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
@@ -27,6 +27,7 @@ import java.util.Map;
import java.util.TreeMap;
import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FileChecksum;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSUtil;
@@ -98,17 +99,18 @@ public class JsonUtil {
/** Convert an exception object to a Json string. */
public static String toJsonString(final Exception e) {
final Map m = new TreeMap();
- m.put("className", e.getClass().getName());
+ m.put("exception", e.getClass().getSimpleName());
m.put("message", e.getMessage());
+ m.put("javaClassName", e.getClass().getName());
return toJsonString(RemoteException.class, m);
}
/** Convert a Json map to a RemoteException. */
public static RemoteException toRemoteException(final Map, ?> json) {
final Map, ?> m = (Map, ?>)json.get(RemoteException.class.getSimpleName());
- final String className = (String)m.get("className");
final String message = (String)m.get("message");
- return new RemoteException(className, message);
+ final String javaClassName = (String)m.get("javaClassName");
+ return new RemoteException(javaClassName, message);
}
private static String toJsonString(final Class> clazz, final Object value) {
@@ -133,37 +135,39 @@ public class JsonUtil {
}
/** Convert a HdfsFileStatus object to a Json string. */
- public static String toJsonString(final HdfsFileStatus status) {
+ public static String toJsonString(final HdfsFileStatus status,
+ boolean includeType) {
if (status == null) {
return null;
- } else {
- final Map m = new TreeMap();
- m.put("localName", status.getLocalName());
- m.put("isDir", status.isDir());
- m.put("isSymlink", status.isSymlink());
- if (status.isSymlink()) {
- m.put("symlink", status.getSymlink());
- }
-
- m.put("len", status.getLen());
- m.put("owner", status.getOwner());
- m.put("group", status.getGroup());
- m.put("permission", toString(status.getPermission()));
- m.put("accessTime", status.getAccessTime());
- m.put("modificationTime", status.getModificationTime());
- m.put("blockSize", status.getBlockSize());
- m.put("replication", status.getReplication());
- return toJsonString(HdfsFileStatus.class, m);
}
+ final Map m = new TreeMap();
+ m.put("localName", status.getLocalName());
+ m.put("isDir", status.isDir());
+ m.put("isSymlink", status.isSymlink());
+ if (status.isSymlink()) {
+ m.put("symlink", status.getSymlink());
+ }
+
+ m.put("len", status.getLen());
+ m.put("owner", status.getOwner());
+ m.put("group", status.getGroup());
+ m.put("permission", toString(status.getPermission()));
+ m.put("accessTime", status.getAccessTime());
+ m.put("modificationTime", status.getModificationTime());
+ m.put("blockSize", status.getBlockSize());
+ m.put("replication", status.getReplication());
+ return includeType ? toJsonString(HdfsFileStatus.class, m) :
+ JSON.toString(m);
}
/** Convert a Json map to a HdfsFileStatus object. */
- public static HdfsFileStatus toFileStatus(final Map, ?> json) {
+ public static HdfsFileStatus toFileStatus(final Map, ?> json, boolean includesType) {
if (json == null) {
return null;
}
- final Map, ?> m = (Map, ?>)json.get(HdfsFileStatus.class.getSimpleName());
+ final Map, ?> m = includesType ?
+ (Map, ?>)json.get(HdfsFileStatus.class.getSimpleName()) : json;
final String localName = (String) m.get("localName");
final boolean isDir = (Boolean) m.get("isDir");
final boolean isSymlink = (Boolean) m.get("isSymlink");
@@ -287,7 +291,7 @@ public class JsonUtil {
return array;
}
}
-
+
/** Convert a LocatedBlock to a Json map. */
private static Map toJsonMap(final LocatedBlock locatedblock
) throws IOException {
@@ -331,7 +335,7 @@ public class JsonUtil {
} else {
final Object[] a = new Object[array.size()];
for(int i = 0; i < array.size(); i++) {
- a[i] = toJsonMap(array.get(0));
+ a[i] = toJsonMap(array.get(i));
}
return a;
}
@@ -433,7 +437,7 @@ public class JsonUtil {
m.put("algorithm", checksum.getAlgorithmName());
m.put("length", checksum.getLength());
m.put("bytes", StringUtils.byteToHexString(checksum.getBytes()));
- return toJsonString(MD5MD5CRC32FileChecksum.class, m);
+ return toJsonString(FileChecksum.class, m);
}
/** Convert a Json map to a MD5MD5CRC32FileChecksum. */
@@ -443,8 +447,7 @@ public class JsonUtil {
return null;
}
- final Map, ?> m = (Map, ?>)json.get(
- MD5MD5CRC32FileChecksum.class.getSimpleName());
+ final Map, ?> m = (Map, ?>)json.get(FileChecksum.class.getSimpleName());
final String algorithm = (String)m.get("algorithm");
final int length = (int)(long)(Long)m.get("length");
final byte[] bytes = StringUtils.hexStringToByte((String)m.get("bytes"));
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
index 27d6fe166ea..b37c0ed6e02 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
@@ -24,6 +24,7 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
+import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
@@ -31,6 +32,8 @@ import java.util.Arrays;
import java.util.List;
import java.util.Map;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.ContentSummary;
@@ -38,25 +41,30 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.ParentNotDirectoryException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.ByteRangeInputStream;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
-import org.apache.hadoop.hdfs.HftpFileSystem;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenRenewer;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
+import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.hdfs.web.resources.AccessTimeParam;
import org.apache.hadoop.hdfs.web.resources.BlockSizeParam;
import org.apache.hadoop.hdfs.web.resources.BufferSizeParam;
+import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
import org.apache.hadoop.hdfs.web.resources.DeleteOpParam;
-import org.apache.hadoop.hdfs.web.resources.DstPathParam;
+import org.apache.hadoop.hdfs.web.resources.DestinationParam;
import org.apache.hadoop.hdfs.web.resources.GetOpParam;
import org.apache.hadoop.hdfs.web.resources.GroupParam;
import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
@@ -76,26 +84,47 @@ import org.apache.hadoop.hdfs.web.resources.ReplicationParam;
import org.apache.hadoop.hdfs.web.resources.UserParam;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
+import org.apache.hadoop.security.SecurityUtil;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authentication.client.AuthenticatedURL;
import org.apache.hadoop.security.authentication.client.AuthenticationException;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenRenewer;
+import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenSelector;
import org.apache.hadoop.util.Progressable;
import org.mortbay.util.ajax.JSON;
/** A FileSystem for HDFS over the web. */
-public class WebHdfsFileSystem extends HftpFileSystem {
+public class WebHdfsFileSystem extends FileSystem
+ implements DelegationTokenRenewer.Renewable {
+ public static final Log LOG = LogFactory.getLog(WebHdfsFileSystem.class);
/** File System URI: {SCHEME}://namenode:port/path/to/file */
public static final String SCHEME = "webhdfs";
+ /** WebHdfs version. */
+ public static final int VERSION = 1;
/** Http URI: http://namenode:port/{PATH_PREFIX}/path/to/file */
- public static final String PATH_PREFIX = SCHEME;
+ public static final String PATH_PREFIX = "/" + SCHEME + "/v" + VERSION;
+ /** SPNEGO authenticator */
private static final KerberosUgiAuthenticator AUTH = new KerberosUgiAuthenticator();
+ /** Delegation token kind */
+ public static final Text TOKEN_KIND = new Text("WEBHDFS delegation");
+
+ private static final DelegationTokenRenewer dtRenewer
+ = new DelegationTokenRenewer(WebHdfsFileSystem.class);
+ static {
+ dtRenewer.start();
+ }
private final UserGroupInformation ugi;
+ private InetSocketAddress nnAddr;
+ private Token> delegationToken;
+ private Token> renewToken;
private final AuthenticatedURL.Token authToken = new AuthenticatedURL.Token();
- protected Path workingDir;
+ private Path workingDir;
{
try {
@@ -111,7 +140,47 @@ public class WebHdfsFileSystem extends HftpFileSystem {
super.initialize(uri, conf);
setConf(conf);
+ this.nnAddr = NetUtils.createSocketAddr(uri.toString());
this.workingDir = getHomeDirectory();
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ initDelegationToken();
+ }
+ }
+
+ protected void initDelegationToken() throws IOException {
+ // look for webhdfs token, then try hdfs
+ final Text serviceName = SecurityUtil.buildTokenService(nnAddr);
+ Token> token = webhdfspTokenSelector.selectToken(
+ serviceName, ugi.getTokens());
+ if (token == null) {
+ token = DelegationTokenSelector.selectHdfsDelegationToken(
+ nnAddr, ugi, getConf());
+ }
+
+ //since we don't already have a token, go get one
+ boolean createdToken = false;
+ if (token == null) {
+ token = getDelegationToken(null);
+ createdToken = (token != null);
+ }
+
+ // security might be disabled
+ if (token != null) {
+ setDelegationToken(token);
+ if (createdToken) {
+ dtRenewer.addRenewAction(this);
+ LOG.debug("Created new DT for " + token.getService());
+ } else {
+ LOG.debug("Found existing DT for " + token.getService());
+ }
+ }
+ }
+
+ @Override
+ protected int getDefaultPort() {
+ return getConf().getInt(DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_KEY,
+ DFSConfigKeys.DFS_NAMENODE_HTTP_PORT_DEFAULT);
}
@Override
@@ -148,19 +217,18 @@ public class WebHdfsFileSystem extends HftpFileSystem {
return f.isAbsolute()? f: new Path(workingDir, f);
}
- @SuppressWarnings("unchecked")
- private static T jsonParse(final InputStream in) throws IOException {
+ private static Map, ?> jsonParse(final InputStream in) throws IOException {
if (in == null) {
throw new IOException("The input stream is null.");
}
- return (T)JSON.parse(new InputStreamReader(in));
+ return (Map, ?>)JSON.parse(new InputStreamReader(in));
}
- private static void validateResponse(final HttpOpParam.Op op,
+ private static Map, ?> validateResponse(final HttpOpParam.Op op,
final HttpURLConnection conn) throws IOException {
final int code = conn.getResponseCode();
if (code != op.getExpectedHttpResponseCode()) {
- final Map m;
+ final Map, ?> m;
try {
m = jsonParse(conn.getErrorStream());
} catch(IOException e) {
@@ -169,6 +237,10 @@ public class WebHdfsFileSystem extends HftpFileSystem {
+ ", message=" + conn.getResponseMessage(), e);
}
+ if (m.get(RemoteException.class.getSimpleName()) == null) {
+ return m;
+ }
+
final RemoteException re = JsonUtil.toRemoteException(m);
throw re.unwrapRemoteException(AccessControlException.class,
DSQuotaExceededException.class,
@@ -179,34 +251,82 @@ public class WebHdfsFileSystem extends HftpFileSystem {
NSQuotaExceededException.class,
UnresolvedPathException.class);
}
+ return null;
+ }
+
+ /**
+ * Return a URL pointing to given path on the namenode.
+ *
+ * @param path to obtain the URL for
+ * @param query string to append to the path
+ * @return namenode URL referring to the given path
+ * @throws IOException on error constructing the URL
+ */
+ private URL getNamenodeURL(String path, String query) throws IOException {
+ final URL url = new URL("http", nnAddr.getHostName(),
+ nnAddr.getPort(), path + '?' + query);
+ if (LOG.isTraceEnabled()) {
+ LOG.trace("url=" + url);
+ }
+ return url;
+ }
+
+ private String addDt2Query(String query) throws IOException {
+ if (UserGroupInformation.isSecurityEnabled()) {
+ synchronized (this) {
+ if (delegationToken != null) {
+ final String encoded = delegationToken.encodeToUrlString();
+ return query + JspHelper.getDelegationTokenUrlParam(encoded);
+ } // else we are talking to an insecure cluster
+ }
+ }
+ return query;
}
URL toUrl(final HttpOpParam.Op op, final Path fspath,
final Param,?>... parameters) throws IOException {
//initialize URI path and query
- final String path = "/" + PATH_PREFIX
+ final String path = PATH_PREFIX
+ (fspath == null? "/": makeQualified(fspath).toUri().getPath());
final String query = op.toQueryString()
+ '&' + new UserParam(ugi)
+ Param.toSortedString("&", parameters);
- final URL url = getNamenodeURL(path, addDelegationTokenParam(query));
+ final URL url;
+ if (op.equals(PutOpParam.Op.RENEWDELEGATIONTOKEN)
+ || op.equals(GetOpParam.Op.GETDELEGATIONTOKEN)) {
+ // Skip adding delegation token for getting or renewing delegation token,
+ // because these operations require kerberos authentication.
+ url = getNamenodeURL(path, query);
+ } else {
+ url = getNamenodeURL(path, addDt2Query(query));
+ }
if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url);
}
return url;
}
+ private HttpURLConnection getHttpUrlConnection(URL url)
+ throws IOException {
+ final HttpURLConnection conn;
+ try {
+ if (ugi.hasKerberosCredentials()) {
+ conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
+ } else {
+ conn = (HttpURLConnection)url.openConnection();
+ }
+ } catch (AuthenticationException e) {
+ throw new IOException("Authentication failed, url=" + url, e);
+ }
+ return conn;
+ }
+
private HttpURLConnection httpConnect(final HttpOpParam.Op op, final Path fspath,
final Param,?>... parameters) throws IOException {
final URL url = toUrl(op, fspath, parameters);
//connect and get response
- final HttpURLConnection conn;
- try {
- conn = new AuthenticatedURL(AUTH).openConnection(url, authToken);
- } catch(AuthenticationException e) {
- throw new IOException("Authentication failed, url=" + url, e);
- }
+ final HttpURLConnection conn = getHttpUrlConnection(url);
try {
conn.setRequestMethod(op.getType().toString());
conn.setDoOutput(op.getDoOutput());
@@ -216,7 +336,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
}
conn.connect();
return conn;
- } catch(IOException e) {
+ } catch (IOException e) {
conn.disconnect();
throw e;
}
@@ -229,15 +349,15 @@ public class WebHdfsFileSystem extends HftpFileSystem {
* @param op http operation
* @param fspath file system path
* @param parameters parameters for the operation
- * @return a JSON object, e.g. Object[], Map, etc.
+ * @return a JSON object, e.g. Object[], Map, ?>, etc.
* @throws IOException
*/
- private T run(final HttpOpParam.Op op, final Path fspath,
+ private Map, ?> run(final HttpOpParam.Op op, final Path fspath,
final Param,?>... parameters) throws IOException {
final HttpURLConnection conn = httpConnect(op, fspath, parameters);
- validateResponse(op, conn);
try {
- return WebHdfsFileSystem.jsonParse(conn.getInputStream());
+ final Map, ?> m = validateResponse(op, conn);
+ return m != null? m: jsonParse(conn.getInputStream());
} finally {
conn.disconnect();
}
@@ -252,8 +372,8 @@ public class WebHdfsFileSystem extends HftpFileSystem {
private HdfsFileStatus getHdfsFileStatus(Path f) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETFILESTATUS;
- final Map json = run(op, f);
- final HdfsFileStatus status = JsonUtil.toFileStatus(json);
+ final Map, ?> json = run(op, f);
+ final HdfsFileStatus status = JsonUtil.toFileStatus(json, true);
if (status == null) {
throw new FileNotFoundException("File does not exist: " + f);
}
@@ -278,7 +398,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
public boolean mkdirs(Path f, FsPermission permission) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.MKDIRS;
- final Map json = run(op, f,
+ final Map, ?> json = run(op, f,
new PermissionParam(applyUMask(permission)));
return (Boolean)json.get("boolean");
}
@@ -287,8 +407,8 @@ public class WebHdfsFileSystem extends HftpFileSystem {
public boolean rename(final Path src, final Path dst) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.RENAME;
- final Map json = run(op, src,
- new DstPathParam(makeQualified(dst).toUri().getPath()));
+ final Map, ?> json = run(op, src,
+ new DestinationParam(makeQualified(dst).toUri().getPath()));
return (Boolean)json.get("boolean");
}
@@ -298,7 +418,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
final Options.Rename... options) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.RENAME;
- run(op, src, new DstPathParam(makeQualified(dst).toUri().getPath()),
+ run(op, src, new DestinationParam(makeQualified(dst).toUri().getPath()),
new RenameOptionSetParam(options));
}
@@ -327,8 +447,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
) throws IOException {
statistics.incrementWriteOps(1);
final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
- final Map json = run(op, p,
- new ReplicationParam(replication));
+ final Map, ?> json = run(op, p, new ReplicationParam(replication));
return (Boolean)json.get("boolean");
}
@@ -340,6 +459,18 @@ public class WebHdfsFileSystem extends HftpFileSystem {
run(op, p, new ModificationTimeParam(mtime), new AccessTimeParam(atime));
}
+ @Override
+ public long getDefaultBlockSize() {
+ return getConf().getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+ DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
+ }
+
+ @Override
+ public short getDefaultReplication() {
+ return (short)getConf().getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
+ DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+ }
+
private FSDataOutputStream write(final HttpOpParam.Op op,
final HttpURLConnection conn, final int bufferSize) throws IOException {
return new FSDataOutputStream(new BufferedOutputStream(
@@ -382,10 +513,16 @@ public class WebHdfsFileSystem extends HftpFileSystem {
return write(op, conn, bufferSize);
}
+ @SuppressWarnings("deprecation")
+ @Override
+ public boolean delete(final Path f) throws IOException {
+ return delete(f, true);
+ }
+
@Override
public boolean delete(Path f, boolean recursive) throws IOException {
final HttpOpParam.Op op = DeleteOpParam.Op.DELETE;
- final Map json = run(op, f, new RecursiveParam(recursive));
+ final Map, ?> json = run(op, f, new RecursiveParam(recursive));
return (Boolean)json.get("boolean");
}
@@ -395,7 +532,24 @@ public class WebHdfsFileSystem extends HftpFileSystem {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.OPEN;
final URL url = toUrl(op, f, new BufferSizeParam(buffersize));
- return new FSDataInputStream(new ByteRangeInputStream(url));
+ ByteRangeInputStream str = getByteRangeInputStream(url);
+ return new FSDataInputStream(str);
+ }
+
+ private class URLOpener extends ByteRangeInputStream.URLOpener {
+
+ public URLOpener(URL u) {
+ super(u);
+ }
+
+ @Override
+ public HttpURLConnection openConnection() throws IOException {
+ return getHttpUrlConnection(offsetUrl);
+ }
+ }
+
+ private ByteRangeInputStream getByteRangeInputStream(URL url) {
+ return new ByteRangeInputStream(new URLOpener(url), new URLOpener(null));
}
@Override
@@ -404,24 +558,24 @@ public class WebHdfsFileSystem extends HftpFileSystem {
final HttpOpParam.Op op = GetOpParam.Op.LISTSTATUS;
final Map, ?> json = run(op, f);
- final Object[] array = (Object[])json.get(
- HdfsFileStatus[].class.getSimpleName());
+ final Map, ?> rootmap = (Map, ?>)json.get(HdfsFileStatus.class.getSimpleName() + "es");
+ final Object[] array = (Object[])rootmap.get(HdfsFileStatus.class.getSimpleName());
//convert FileStatus
final FileStatus[] statuses = new FileStatus[array.length];
for(int i = 0; i < array.length; i++) {
- @SuppressWarnings("unchecked")
- final Map m = (Map)array[i];
- statuses[i] = makeQualified(JsonUtil.toFileStatus(m), f);
+ final Map, ?> m = (Map, ?>)array[i];
+ statuses[i] = makeQualified(JsonUtil.toFileStatus(m, false), f);
}
return statuses;
}
+ @SuppressWarnings("deprecation")
@Override
public Token getDelegationToken(final String renewer
) throws IOException {
final HttpOpParam.Op op = GetOpParam.Op.GETDELEGATIONTOKEN;
- final Map m = run(op, null, new RenewerParam(renewer));
+ final Map, ?> m = run(op, null, new RenewerParam(renewer));
final Token token = JsonUtil.toDelegationToken(m);
token.setService(new Text(getCanonicalServiceName()));
return token;
@@ -434,6 +588,45 @@ public class WebHdfsFileSystem extends HftpFileSystem {
return Arrays.asList(t);
}
+ @Override
+ public Token> getRenewToken() {
+ return renewToken;
+ }
+
+ @Override
+ public void setDelegationToken(
+ final Token token) {
+ synchronized(this) {
+ renewToken = token;
+ // emulate the 203 usage of the tokens
+ // by setting the kind and service as if they were hdfs tokens
+ delegationToken = new Token(token);
+ // NOTE: the remote nn must be configured to use hdfs
+ delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+ // no need to change service because we aren't exactly sure what it
+ // should be. we can guess, but it might be wrong if the local conf
+ // value is incorrect. the service is a client side field, so the remote
+ // end does not care about the value
+ }
+ }
+
+ private synchronized long renewDelegationToken(final Token> token
+ ) throws IOException {
+ final HttpOpParam.Op op = PutOpParam.Op.RENEWDELEGATIONTOKEN;
+ TokenArgumentParam dtargParam = new TokenArgumentParam(
+ token.encodeToUrlString());
+ final Map, ?> m = run(op, null, dtargParam);
+ return (Long) m.get("long");
+ }
+
+ private synchronized void cancelDelegationToken(final Token> token
+ ) throws IOException {
+ final HttpOpParam.Op op = PutOpParam.Op.CANCELDELEGATIONTOKEN;
+ TokenArgumentParam dtargParam = new TokenArgumentParam(
+ token.encodeToUrlString());
+ run(op, null, dtargParam);
+ }
+
@Override
public BlockLocation[] getFileBlockLocations(final FileStatus status,
final long offset, final long length) throws IOException {
@@ -449,7 +642,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILEBLOCKLOCATIONS;
- final Map m = run(op, p, new OffsetParam(offset),
+ final Map, ?> m = run(op, p, new OffsetParam(offset),
new LengthParam(length));
return DFSUtil.locatedBlocks2Locations(JsonUtil.toLocatedBlocks(m));
}
@@ -459,7 +652,7 @@ public class WebHdfsFileSystem extends HftpFileSystem {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETCONTENTSUMMARY;
- final Map m = run(op, p);
+ final Map, ?> m = run(op, p);
return JsonUtil.toContentSummary(m);
}
@@ -469,7 +662,69 @@ public class WebHdfsFileSystem extends HftpFileSystem {
statistics.incrementReadOps(1);
final HttpOpParam.Op op = GetOpParam.Op.GETFILECHECKSUM;
- final Map m = run(op, p);
+ final Map, ?> m = run(op, p);
return JsonUtil.toMD5MD5CRC32FileChecksum(m);
}
-}
\ No newline at end of file
+
+ private static final DtSelector webhdfspTokenSelector = new DtSelector();
+
+ private static class DtSelector
+ extends AbstractDelegationTokenSelector {
+ private DtSelector() {
+ super(TOKEN_KIND);
+ }
+ }
+
+ /** Delegation token renewer. */
+ public static class DtRenewer extends TokenRenewer {
+ @Override
+ public boolean handleKind(Text kind) {
+ return kind.equals(TOKEN_KIND);
+ }
+
+ @Override
+ public boolean isManaged(Token> token) throws IOException {
+ return true;
+ }
+
+ private static WebHdfsFileSystem getWebHdfs(
+ final Token> token, final Configuration conf
+ ) throws IOException, InterruptedException, URISyntaxException {
+
+ final InetSocketAddress nnAddr = NetUtils.createSocketAddr(
+ token.getService().toString());
+ final URI uri = DFSUtil.createUri(WebHdfsFileSystem.SCHEME, nnAddr);
+ return (WebHdfsFileSystem)FileSystem.get(uri, conf);
+ }
+
+ @Override
+ public long renew(final Token> token, final Configuration conf
+ ) throws IOException, InterruptedException {
+ final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+ // update the kerberos credentials, if they are coming from a keytab
+ ugi.checkTGTAndReloginFromKeytab();
+
+ try {
+ WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
+ return webhdfs.renewDelegationToken(token);
+ } catch (URISyntaxException e) {
+ throw new IOException(e);
+ }
+ }
+
+ @Override
+ public void cancel(final Token> token, final Configuration conf
+ ) throws IOException, InterruptedException {
+ final UserGroupInformation ugi = UserGroupInformation.getLoginUser();
+ // update the kerberos credentials, if they are coming from a keytab
+ ugi.checkTGTAndReloginFromKeytab();
+
+ try {
+ final WebHdfsFileSystem webhdfs = getWebHdfs(token, conf);
+ webhdfs.cancelDelegationToken(token);
+ } catch (URISyntaxException e) {
+ throw new IOException(e);
+ }
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
index 8d82131c703..9bc938dee5c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/AccessTimeParam.java
@@ -31,7 +31,7 @@ public class AccessTimeParam extends LongParam {
* @param value the parameter value.
*/
public AccessTimeParam(final Long value) {
- super(DOMAIN, value);
+ super(DOMAIN, value, -1L, null);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
index 96114968074..4076746e34e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BlockSizeParam.java
@@ -36,7 +36,7 @@ public class BlockSizeParam extends LongParam {
* @param value the parameter value.
*/
public BlockSizeParam(final Long value) {
- super(DOMAIN, value);
+ super(DOMAIN, value, 1L, null);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
index 148834b1024..376d7d8ef0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/BufferSizeParam.java
@@ -34,7 +34,7 @@ public class BufferSizeParam extends IntegerParam {
* @param value the parameter value.
*/
public BufferSizeParam(final Integer value) {
- super(DOMAIN, value);
+ super(DOMAIN, value, 1, null);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
index ad08773ea24..57be43e58f0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DelegationParam.java
@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.web.resources;
import org.apache.hadoop.security.UserGroupInformation;
-/** Delegation token parameter. */
+/** Represents delegation token used for authentication. */
public class DelegationParam extends StringParam {
/** Parameter name. */
public static final String NAME = "delegation";
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
new file mode 100644
index 00000000000..67597385da2
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DestinationParam.java
@@ -0,0 +1,54 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.hadoop.fs.Path;
+
+/** Destination path parameter. */
+public class DestinationParam extends StringParam {
+ /** Parameter name. */
+ public static final String NAME = "destination";
+ /** Default parameter value. */
+ public static final String DEFAULT = "";
+
+ private static final Domain DOMAIN = new Domain(NAME, null);
+
+ private static String validate(final String str) {
+ if (str == null || str.equals(DEFAULT)) {
+ return null;
+ }
+ if (!str.startsWith(Path.SEPARATOR)) {
+ throw new IllegalArgumentException("Invalid parameter value: " + NAME
+ + " = \"" + str + "\" is not an absolute path.");
+ }
+ return new Path(str).toUri().getPath();
+ }
+
+ /**
+ * Constructor.
+ * @param str a string representation of the parameter value.
+ */
+ public DestinationParam(final String str) {
+ super(DOMAIN, validate(str));
+ }
+
+ @Override
+ public String getName() {
+ return NAME;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
index 8a04c4ad918..bd0003ef0e9 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ExceptionHandler.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.web.resources;
import java.io.FileNotFoundException;
import java.io.IOException;
+import javax.servlet.http.HttpServletResponse;
+import javax.ws.rs.core.Context;
import javax.ws.rs.core.MediaType;
import javax.ws.rs.core.Response;
import javax.ws.rs.ext.ExceptionMapper;
@@ -29,17 +31,33 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hdfs.web.JsonUtil;
+import com.sun.jersey.api.ParamException;
+
/** Handle exceptions. */
@Provider
public class ExceptionHandler implements ExceptionMapper {
public static final Log LOG = LogFactory.getLog(ExceptionHandler.class);
+ private @Context HttpServletResponse response;
+
@Override
- public Response toResponse(final Exception e) {
+ public Response toResponse(Exception e) {
if (LOG.isTraceEnabled()) {
LOG.trace("GOT EXCEPITION", e);
}
+ //clear content type
+ response.setContentType(null);
+
+ //Convert exception
+ if (e instanceof ParamException) {
+ final ParamException paramexception = (ParamException)e;
+ e = new IllegalArgumentException("Invalid value for webhdfs parameter \""
+ + paramexception.getParameterName() + "\": "
+ + e.getCause().getMessage(), e);
+ }
+
+ //Map response status
final Response.Status s;
if (e instanceof SecurityException) {
s = Response.Status.UNAUTHORIZED;
@@ -49,7 +67,10 @@ public class ExceptionHandler implements ExceptionMapper {
s = Response.Status.FORBIDDEN;
} else if (e instanceof UnsupportedOperationException) {
s = Response.Status.BAD_REQUEST;
+ } else if (e instanceof IllegalArgumentException) {
+ s = Response.Status.BAD_REQUEST;
} else {
+ LOG.warn("INTERNAL_SERVER_ERROR", e);
s = Response.Status.INTERNAL_SERVER_ERROR;
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
index 5e890876106..b80b1a254aa 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/IntegerParam.java
@@ -19,8 +19,24 @@ package org.apache.hadoop.hdfs.web.resources;
/** Integer parameter. */
abstract class IntegerParam extends Param {
- IntegerParam(final Domain domain, final Integer value) {
+ IntegerParam(final Domain domain, final Integer value,
+ final Integer min, final Integer max) {
super(domain, value);
+ checkRange(min, max);
+ }
+
+ private void checkRange(final Integer min, final Integer max) {
+ if (value == null) {
+ return;
+ }
+ if (min != null && value < min) {
+ throw new IllegalArgumentException("Invalid parameter range: " + getName()
+ + " = " + domain.toString(value) + " < " + domain.toString(min));
+ }
+ if (max != null && value > max) {
+ throw new IllegalArgumentException("Invalid parameter range: " + getName()
+ + " = " + domain.toString(value) + " > " + domain.toString(max));
+ }
}
@Override
@@ -49,7 +65,12 @@ abstract class IntegerParam extends Param {
@Override
Integer parse(final String str) {
- return NULL.equals(str)? null: Integer.parseInt(str, radix);
+ try{
+ return NULL.equals(str)? null: Integer.parseInt(str, radix);
+ } catch(NumberFormatException e) {
+ throw new IllegalArgumentException("Failed to parse \"" + str
+ + "\" as a radix-" + radix + " integer.", e);
+ }
}
/** Convert an Integer to a String. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
index 90d4f6289d9..6c59ee51432 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LengthParam.java
@@ -31,7 +31,7 @@ public class LengthParam extends LongParam {
* @param value the parameter value.
*/
public LengthParam(final Long value) {
- super(DOMAIN, value);
+ super(DOMAIN, value, 0L, null);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
index 8a3e0f5e413..023402cfe01 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/LongParam.java
@@ -19,8 +19,23 @@ package org.apache.hadoop.hdfs.web.resources;
/** Long parameter. */
abstract class LongParam extends Param {
- LongParam(final Domain domain, final Long value) {
+ LongParam(final Domain domain, final Long value, final Long min, final Long max) {
super(domain, value);
+ checkRange(min, max);
+ }
+
+ private void checkRange(final Long min, final Long max) {
+ if (value == null) {
+ return;
+ }
+ if (min != null && value < min) {
+ throw new IllegalArgumentException("Invalid parameter range: " + getName()
+ + " = " + domain.toString(value) + " < " + domain.toString(min));
+ }
+ if (max != null && value > max) {
+ throw new IllegalArgumentException("Invalid parameter range: " + getName()
+ + " = " + domain.toString(value) + " > " + domain.toString(max));
+ }
}
@Override
@@ -49,7 +64,12 @@ abstract class LongParam extends Param {
@Override
Long parse(final String str) {
- return NULL.equals(str)? null: Long.parseLong(str, radix);
+ try {
+ return NULL.equals(str)? null: Long.parseLong(str, radix);
+ } catch(NumberFormatException e) {
+ throw new IllegalArgumentException("Failed to parse \"" + str
+ + "\" as a radix-" + radix + " long integer.", e);
+ }
}
/** Convert a Short to a String. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
index a0e38a97e7d..59911d70b88 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ModificationTimeParam.java
@@ -31,7 +31,7 @@ public class ModificationTimeParam extends LongParam {
* @param value the parameter value.
*/
public ModificationTimeParam(final Long value) {
- super(DOMAIN, value);
+ super(DOMAIN, value, -1L, null);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
index 8b3654dbd87..6973787847d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/OffsetParam.java
@@ -31,7 +31,7 @@ public class OffsetParam extends LongParam {
* @param value the parameter value.
*/
public OffsetParam(final Long value) {
- super(DOMAIN, value);
+ super(DOMAIN, value, 0L, null);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
index 264e60226bf..d283423fa0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PermissionParam.java
@@ -27,13 +27,15 @@ public class PermissionParam extends ShortParam {
public static final String DEFAULT = NULL;
private static final Domain DOMAIN = new Domain(NAME, 8);
+
+ private static final short DEFAULT_PERMISSION = 0755;
/**
* Constructor.
* @param value the parameter value.
*/
public PermissionParam(final FsPermission value) {
- super(DOMAIN, value == null? null: value.toShort());
+ super(DOMAIN, value == null? null: value.toShort(), null, null);
}
/**
@@ -41,7 +43,7 @@ public class PermissionParam extends ShortParam {
* @param str a string representation of the parameter value.
*/
public PermissionParam(final String str) {
- super(DOMAIN, DOMAIN.parse(str));
+ super(DOMAIN, DOMAIN.parse(str), (short)0, (short)01777);
}
@Override
@@ -51,7 +53,7 @@ public class PermissionParam extends ShortParam {
/** @return the represented FsPermission. */
public FsPermission getFsPermission() {
- final Short mode = getValue();
- return mode == null? FsPermission.getDefault(): new FsPermission(mode);
+ final Short v = getValue();
+ return new FsPermission(v != null? v: DEFAULT_PERMISSION);
}
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
index dcfaa6f06cd..45119a93805 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PutOpParam.java
@@ -33,6 +33,9 @@ public class PutOpParam extends HttpOpParam {
SETPERMISSION(false, HttpURLConnection.HTTP_OK),
SETTIMES(false, HttpURLConnection.HTTP_OK),
+ RENEWDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
+ CANCELDELEGATIONTOKEN(false, HttpURLConnection.HTTP_OK),
+
NULL(false, HttpURLConnection.HTTP_NOT_IMPLEMENTED);
final boolean doOutput;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
index e13aec8115b..797709abdec 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ReplicationParam.java
@@ -17,6 +17,11 @@
*/
package org.apache.hadoop.hdfs.web.resources;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_DEFAULT;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_REPLICATION_KEY;
+
+import org.apache.hadoop.conf.Configuration;
+
/** Replication parameter. */
public class ReplicationParam extends ShortParam {
/** Parameter name. */
@@ -31,7 +36,7 @@ public class ReplicationParam extends ShortParam {
* @param value the parameter value.
*/
public ReplicationParam(final Short value) {
- super(DOMAIN, value);
+ super(DOMAIN, value, (short)1, null);
}
/**
@@ -46,4 +51,10 @@ public class ReplicationParam extends ShortParam {
public String getName() {
return NAME;
}
+
+ /** @return the value or, if it is null, return the default from conf. */
+ public short getValue(final Configuration conf) {
+ return getValue() != null? getValue()
+ : (short)conf.getInt(DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
+ }
}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
index af3e72f6876..c1749cf18eb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/ShortParam.java
@@ -19,8 +19,24 @@ package org.apache.hadoop.hdfs.web.resources;
/** Short parameter. */
abstract class ShortParam extends Param {
- ShortParam(final Domain domain, final Short value) {
+ ShortParam(final Domain domain, final Short value,
+ final Short min, final Short max) {
super(domain, value);
+ checkRange(min, max);
+ }
+
+ private void checkRange(final Short min, final Short max) {
+ if (value == null) {
+ return;
+ }
+ if (min != null && value < min) {
+ throw new IllegalArgumentException("Invalid parameter range: " + getName()
+ + " = " + domain.toString(value) + " < " + domain.toString(min));
+ }
+ if (max != null && value > max) {
+ throw new IllegalArgumentException("Invalid parameter range: " + getName()
+ + " = " + domain.toString(value) + " > " + domain.toString(max));
+ }
}
@Override
@@ -49,7 +65,12 @@ abstract class ShortParam extends Param {
@Override
Short parse(final String str) {
- return NULL.equals(str)? null: Short.parseShort(str, radix);
+ try {
+ return NULL.equals(str)? null: Short.parseShort(str, radix);
+ } catch(NumberFormatException e) {
+ throw new IllegalArgumentException("Failed to parse \"" + str
+ + "\" as a radix-" + radix + " short integer.", e);
+ }
}
/** Convert a Short to a String. */
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
similarity index 73%
rename from hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
index 5fa52456f92..53b38ac67ab 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/DstPathParam.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/TokenArgumentParam.java
@@ -17,12 +17,13 @@
*/
package org.apache.hadoop.hdfs.web.resources;
-import org.apache.hadoop.fs.Path;
-
-/** Destination path parameter. */
-public class DstPathParam extends StringParam {
+/**
+ * Represents delegation token parameter as method arguments. This is
+ * different from {@link DelegationParam}.
+ */
+public class TokenArgumentParam extends StringParam {
/** Parameter name. */
- public static final String NAME = "dstpath";
+ public static final String NAME = "token";
/** Default parameter value. */
public static final String DEFAULT = "";
@@ -30,10 +31,10 @@ public class DstPathParam extends StringParam {
/**
* Constructor.
- * @param str a string representation of the parameter value.
+ * @param str A string representation of the parameter value.
*/
- public DstPathParam(final String str) {
- super(DOMAIN, str == null || str.equals(DEFAULT)? null: new Path(str).toUri().getPath());
+ public TokenArgumentParam(final String str) {
+ super(DOMAIN, str != null && !str.equals(DEFAULT) ? str : null);
}
@Override
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java
index 813b64bfcce..74070243c0d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/UserProvider.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.web.resources;
import java.io.IOException;
import java.lang.reflect.Type;
+import javax.servlet.ServletContext;
import javax.servlet.http.HttpServletRequest;
import javax.ws.rs.core.Context;
import javax.ws.rs.ext.Provider;
@@ -42,13 +43,14 @@ public class UserProvider
extends AbstractHttpContextInjectable
implements InjectableProvider {
@Context HttpServletRequest request;
+ @Context ServletContext servletcontext;
@Override
public UserGroupInformation getValue(final HttpContext context) {
- final Configuration conf = (Configuration)context.getProperties().get(
- JspHelper.CURRENT_CONF);
+ final Configuration conf = (Configuration) servletcontext
+ .getAttribute(JspHelper.CURRENT_CONF);
try {
- return JspHelper.getUGI(null, request, conf,
+ return JspHelper.getUGI(servletcontext, request, conf,
AuthenticationMethod.KERBEROS, false);
} catch (IOException e) {
throw new RuntimeException(e);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
new file mode 100644
index 00000000000..20addd74b00
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
@@ -0,0 +1,4 @@
+org.apache.hadoop.hdfs.DFSClient$Renewer
+org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier$Renewer
+org.apache.hadoop.hdfs.HftpFileSystem$TokenManager
+org.apache.hadoop.hdfs.web.WebHdfsFileSystem$DtRenewer
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto b/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto
index 7c5f859b223..316c05cea98 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/proto/datatransfer.proto
@@ -40,6 +40,17 @@ message OpReadBlockProto {
required uint64 offset = 2;
required uint64 len = 3;
}
+
+
+message ChecksumProto {
+ enum ChecksumType {
+ NULL = 0;
+ CRC32 = 1;
+ CRC32C = 2;
+ }
+ required ChecksumType type = 1;
+ required uint32 bytesPerChecksum = 2;
+}
message OpWriteBlockProto {
required ClientOperationHeaderProto header = 1;
@@ -69,6 +80,11 @@ message OpWriteBlockProto {
required uint64 minBytesRcvd = 6;
required uint64 maxBytesRcvd = 7;
required uint64 latestGenerationStamp = 8;
+
+ /**
+ * The requested checksum mechanism for this block write.
+ */
+ required ChecksumProto requestedChecksum = 9;
}
message OpTransferBlockProto {
@@ -114,11 +130,30 @@ message PipelineAckProto {
repeated Status status = 2;
}
+/**
+ * Sent as part of the BlockOpResponseProto
+ * for READ_BLOCK and COPY_BLOCK operations.
+ */
+message ReadOpChecksumInfoProto {
+ required ChecksumProto checksum = 1;
+
+ /**
+ * The offset into the block at which the first packet
+ * will start. This is necessary since reads will align
+ * backwards to a checksum chunk boundary.
+ */
+ required uint64 chunkOffset = 2;
+}
+
message BlockOpResponseProto {
required Status status = 1;
optional string firstBadLink = 2;
optional OpBlockChecksumResponseProto checksumResponse = 3;
+ optional ReadOpChecksumInfoProto readOpChecksumInfo = 4;
+
+ /** explanatory text which may be useful to log on the client side */
+ optional string message = 5;
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto b/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto
index d11dbfaebc2..a77a7c312e8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/proto/hdfs.proto
@@ -23,13 +23,19 @@ option java_package = "org.apache.hadoop.hdfs.protocol.proto";
option java_outer_classname = "HdfsProtos";
option java_generate_equals_and_hash = true;
+/**
+ * Extended block idenfies a block
+ */
message ExtendedBlockProto {
- required string poolId = 1;
- required uint64 blockId = 2;
- required uint64 numBytes = 3;
- required uint64 generationStamp = 4;
+ required string poolId = 1; // Block pool id - gloablly unique across clusters
+ required uint64 blockId = 2; // the local id within a pool
+ required uint64 generationStamp = 3;
+ optional uint64 numBytes = 4; // block len does not belong in ebid - here for historical reasons
}
+/**
+ * Block Token
+ */
message BlockTokenIdentifierProto {
required bytes identifier = 1;
required bytes password = 2;
@@ -37,12 +43,20 @@ message BlockTokenIdentifierProto {
required string service = 4;
}
+/**
+ * Identifies a Datanode
+ */
message DatanodeIDProto {
- required string name = 1;
- required string storageID = 2;
- required uint32 infoPort = 3;
+ required string name = 1; // hostname:portNumber
+ required string storageID = 2; // Unique storage id
+ required uint32 infoPort = 3; // the port where the infoserver is running
+ required uint32 ipcPort = 4; // the port where the ipc Server is running
}
+
+/**
+ * The status of a Datanode
+ */
message DatanodeInfoProto {
required DatanodeIDProto id = 1;
optional uint64 capacity = 2;
@@ -62,3 +76,116 @@ message DatanodeInfoProto {
optional AdminState adminState = 10;
}
+
+/**
+ * Summary of a file or directory
+ */
+message ContentSummaryProto {
+ required uint64 length = 1;
+ required uint64 fileCount = 2;
+ required uint64 directoryCount = 3;
+ required uint64 quota = 4;
+ required uint64 spaceConsumed = 5;
+ required uint64 spaceQuota = 6;
+}
+
+/**
+ * Contains a list of paths corresponding to corrupt files and a cookie
+ * used for iterative calls to NameNode.listCorruptFileBlocks.
+ *
+ */
+message CorruptFileBlocksProto {
+ repeated string files = 1;
+ required string cookie = 2;
+}
+
+/**
+ * File or Directory permision - same spec as posix
+ */
+message FsPermissionProto {
+ required uint32 perm = 1; // Actually a short - only 16bits used
+}
+
+
+/**
+ * A LocatedBlock gives information about a block and its location.
+ */
+message LocatedBlockProto {
+ required ExtendedBlockProto b = 1;
+ required uint64 offset = 2; // offset of first byte of block in the file
+ repeated DatanodeInfoProto locs = 3; // Locations ordered by proximity to client ip
+ required bool corrupt = 4; // true if all replicas of a block are corrupt, else false
+ // If block has few corrupt replicas, they are filtered and
+ // their locations are not part of this object
+
+ required BlockTokenIdentifierProto blockToken = 5;
+ }
+
+
+/**
+ * A set of file blocks and their locations.
+ */
+message LocatedBlocksProto {
+ required uint64 fileLength = 1;
+ repeated LocatedBlockProto blocks = 2;
+ required bool underConstruction = 3;
+ optional LocatedBlockProto lastBlock = 4;
+ required bool isLastBlockComplete = 5;
+}
+
+
+/**
+ * Status of a file, directory or symlink
+ * Optionally includes a file's block locations if requested by client on the rpc call.
+ */
+message HdfsFileStatusProto {
+ enum FileType {
+ IS_DIR = 1;
+ IS_FILE = 2;
+ IS_SYMLINK = 3;
+ }
+ required FileType fileType = 1;
+ required bytes path = 2; // local name of inode encoded java UTF8
+ required uint64 length = 3;
+ required FsPermissionProto permission = 4;
+ required string owner = 5;
+ required string group = 6;
+ required uint64 modification_time = 7;
+ required uint64 access_time = 8;
+ //
+ // Optional fields for symlink
+ optional bytes symlink = 9; // if symlink, target encoded java UTF8
+ //
+ // Optional fields for file
+ optional uint32 block_replication = 10; // Actually a short - only 16bits used
+ optional uint64 blocksize = 11;
+ optional LocatedBlocksProto locations = 12; // suppled only if asked by client
+}
+
+/**
+ * HDFS Server Defaults
+ */
+message FsServerDefaultsProto {
+ required uint64 blockSize = 1;
+ required uint32 bytesPerChecksum = 2;
+ required uint32 writePacketSize = 3;
+ required uint32 replication = 4; // Actually a short - only 16bits used
+ required uint32 fileBufferSize = 5;
+}
+
+
+/**
+ * Directory listing
+ */
+message DirectoryListingProto {
+ repeated HdfsFileStatusProto partialListing = 1;
+ required uint32 remainingEntries = 2;
+}
+
+/**
+ * Status of current cluster upgrade from one version to another
+ */
+message UpgradeStatusReportProto {
+ required uint32 version = 1;;
+ required uint32 upgradeStatus = 2; // Between 0 and 100 indicating the % complete
+}
diff --git a/hadoop-mapreduce-project/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj
similarity index 53%
rename from hadoop-mapreduce-project/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java
rename to hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj
index 75d7e811bd7..c762e323856 100644
--- a/hadoop-mapreduce-project/src/contrib/fairscheduler/src/java/org/apache/hadoop/mapred/FifoJobComparator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/DataXceiverAspects.aj
@@ -15,29 +15,27 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+package org.apache.hadoop.hdfs.server.datanode;
-package org.apache.hadoop.mapred;
-
-import java.util.Comparator;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
/**
- * Order {@link JobInProgress} objects by priority and then by submit time, as
- * in the default scheduler in Hadoop.
+ * This aspect takes care about faults injected into datanode.DataXceiver
+ * class
*/
-public class FifoJobComparator implements Comparator {
- public int compare(JobInProgress j1, JobInProgress j2) {
- int res = j1.getPriority().compareTo(j2.getPriority());
- if (res == 0) {
- if (j1.getStartTime() < j2.getStartTime()) {
- res = -1;
- } else {
- res = (j1.getStartTime() == j2.getStartTime() ? 0 : 1);
- }
+privileged public aspect DataXceiverAspects {
+ public static final Log LOG = LogFactory.getLog(DataXceiverAspects.class);
+
+ pointcut runXceiverThread(DataXceiver xceiver) :
+ execution (* run(..)) && target(xceiver);
+
+ void around (DataXceiver xceiver) : runXceiverThread(xceiver) {
+ if ("true".equals(System.getProperty("fi.enabledOOM"))) {
+ LOG.info("fi.enabledOOM is enabled");
+ throw new OutOfMemoryError("Pretend there's no more memory");
+ } else {
+ proceed(xceiver);
}
- if (res == 0) {
- // If there is a tie, break it by job ID to get a deterministic order
- res = j1.getJobID().compareTo(j2.getJobID());
- }
- return res;
}
-}
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataXceiverServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataXceiverServer.java
new file mode 100644
index 00000000000..2f92fcf6ec0
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataXceiverServer.java
@@ -0,0 +1,97 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetAddress;
+import java.net.InetSocketAddress;
+import java.net.ServerSocket;
+import java.net.Socket;
+import java.net.SocketAddress;
+import java.util.concurrent.CountDownLatch;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * This is a test for DataXceiverServer when DataXceiver thread spawning is
+ * failed due to OutOfMemoryError. Expected behavior is that DataXceiverServer
+ * should not be exited. It should retry again after 30 seconds
+ */
+public class TestFiDataXceiverServer {
+
+ @Test(timeout = 30000)
+ public void testOutOfMemoryErrorInDataXceiverServerRun() throws Exception {
+ final CountDownLatch latch = new CountDownLatch(1);
+ ServerSocket sock = new ServerSocket() {
+ @Override
+ public Socket accept() throws IOException {
+ return new Socket() {
+ @Override
+ public InetAddress getInetAddress() {
+ return super.getLocalAddress();
+ }
+
+ @Override
+ public SocketAddress getRemoteSocketAddress() {
+ return new InetSocketAddress(8080);
+ }
+
+ @Override
+ public SocketAddress getLocalSocketAddress() {
+ return new InetSocketAddress(0);
+ }
+
+ @Override
+ public synchronized void close() throws IOException {
+ latch.countDown();
+ super.close();
+ }
+
+ @Override
+ public InputStream getInputStream() throws IOException {
+ return null;
+ }
+ };
+ }
+ };
+ Thread thread = null;
+ System.setProperty("fi.enabledOOM", "true");
+ DataNode dn = Mockito.mock(DataNode.class);
+ try {
+ Configuration conf = new Configuration();
+ Mockito.doReturn(conf).when(dn).getConf();
+ dn.shouldRun = true;
+ DataXceiverServer server = new DataXceiverServer(sock, conf, dn);
+ thread = new Thread(server);
+ thread.start();
+ latch.await();
+ assertTrue("Not running the thread", thread.isAlive());
+ } finally {
+ System.setProperty("fi.enabledOOM", "false");
+ dn.shouldRun = false;
+ if (null != thread)
+ thread.interrupt();
+ sock.close();
+ }
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
index 9ee4a6f186a..eb62f4063d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cli/TestHDFSCLI.java
@@ -94,7 +94,9 @@ public class TestHDFSCLI extends CLITestHelperDFS {
return cmd.getExecutor(namenode).executeCommand(cmd.getCmd());
}
- @Test
+ //TODO: The test is failing due to the change in HADOOP-7360.
+ // HDFS-2038 is going to fix it. Disable the test for the moment.
+ //@Test
@Override
public void testAll () {
super.testAll();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
index 3b67f1b4d31..17608ac1f7b 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestResolveHdfsSymlink.java
@@ -105,7 +105,7 @@ public class TestResolveHdfsSymlink {
* @throws IOException
* @throws InterruptedException
*/
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({ "unchecked", "deprecation" })
@Test
public void testFcDelegationToken() throws UnsupportedFileSystemException,
IOException, InterruptedException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
index f6cf5a0c877..8f5f9f8fda8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/MiniDFSCluster.java
@@ -36,6 +36,7 @@ import java.util.Random;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.commons.math.stat.descriptive.rank.Min;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -86,6 +87,10 @@ public class MiniDFSCluster {
private static final String NAMESERVICE_ID_PREFIX = "nameserviceId";
private static final Log LOG = LogFactory.getLog(MiniDFSCluster.class);
+ /** System property to set the data dir: {@value} */
+ public static final String PROP_TEST_BUILD_DATA = "test.build.data";
+ /** Configuration option to set the data dir: {@value} */
+ public static final String HDFS_MINIDFS_BASEDIR = "hdfs.minidfs.basedir";
static { DefaultMetricsSystem.setMiniClusterMode(true); }
@@ -495,7 +500,7 @@ public class MiniDFSCluster {
boolean waitSafeMode, boolean setupHostsFile, boolean federation)
throws IOException {
this.conf = conf;
- base_dir = new File(getBaseDirectory());
+ base_dir = new File(determineDfsBaseDir());
data_dir = new File(base_dir, "data");
this.federation = federation;
this.waitSafeMode = waitSafeMode;
@@ -504,7 +509,7 @@ public class MiniDFSCluster {
String rpcEngineName = System.getProperty("hdfs.rpc.engine");
if (rpcEngineName != null && !"".equals(rpcEngineName)) {
- System.out.println("HDFS using RPCEngine: "+rpcEngineName);
+ LOG.info("HDFS using RPCEngine: " + rpcEngineName);
try {
Class> rpcEngine = conf.getClassByName(rpcEngineName);
setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
@@ -858,8 +863,8 @@ public class MiniDFSCluster {
// Set up datanode address
setupDatanodeAddress(dnConf, setupHostsFile, checkDataNodeAddrConfig);
if (manageDfsDirs) {
- File dir1 = getStorageDir(i, 0);
- File dir2 = getStorageDir(i, 1);
+ File dir1 = getInstanceStorageDir(i, 0);
+ File dir2 = getInstanceStorageDir(i, 1);
dir1.mkdirs();
dir2.mkdirs();
if (!dir1.isDirectory() || !dir2.isDirectory()) {
@@ -875,17 +880,17 @@ public class MiniDFSCluster {
dnConf.setLong(SimulatedFSDataset.CONFIG_PROPERTY_CAPACITY,
simulatedCapacities[i-curDatanodesNum]);
}
- System.out.println("Starting DataNode " + i + " with "
+ LOG.info("Starting DataNode " + i + " with "
+ DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY + ": "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY));
if (hosts != null) {
dnConf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, hosts[i - curDatanodesNum]);
- System.out.println("Starting DataNode " + i + " with hostname set to: "
+ LOG.info("Starting DataNode " + i + " with hostname set to: "
+ dnConf.get(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY));
}
if (racks != null) {
String name = hosts[i - curDatanodesNum];
- System.out.println("Adding node with hostname : " + name + " to rack "+
+ LOG.info("Adding node with hostname : " + name + " to rack " +
racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(name,
racks[i-curDatanodesNum]);
@@ -903,7 +908,7 @@ public class MiniDFSCluster {
String ipAddr = dn.getSelfAddr().getAddress().getHostAddress();
if (racks != null) {
int port = dn.getSelfAddr().getPort();
- System.out.println("Adding node with IP:port : " + ipAddr + ":" + port+
+ LOG.info("Adding node with IP:port : " + ipAddr + ":" + port +
" to rack " + racks[i-curDatanodesNum]);
StaticMapping.addNodeToRack(ipAddr + ":" + port,
racks[i-curDatanodesNum]);
@@ -1099,7 +1104,7 @@ public class MiniDFSCluster {
* Shutdown all the nodes in the cluster.
*/
public void shutdown() {
- System.out.println("Shutting down the Mini HDFS Cluster");
+ LOG.info("Shutting down the Mini HDFS Cluster");
shutdownDataNodes();
for (NameNodeInfo nnInfo : nameNodes) {
NameNode nameNode = nnInfo.nameNode;
@@ -1139,7 +1144,7 @@ public class MiniDFSCluster {
public synchronized void shutdownNameNode(int nnIndex) {
NameNode nn = nameNodes[nnIndex].nameNode;
if (nn != null) {
- System.out.println("Shutting down the namenode");
+ LOG.info("Shutting down the namenode");
nn.stop();
nn.join();
Configuration conf = nameNodes[nnIndex].conf;
@@ -1183,9 +1188,9 @@ public class MiniDFSCluster {
nameNodes[nnIndex] = new NameNodeInfo(nn, conf);
if (waitActive) {
waitClusterUp();
- System.out.println("Restarted the namenode");
+ LOG.info("Restarted the namenode");
waitActive();
- System.out.println("Cluster is active");
+ LOG.info("Cluster is active");
}
}
@@ -1261,7 +1266,7 @@ public class MiniDFSCluster {
}
DataNodeProperties dnprop = dataNodes.remove(i);
DataNode dn = dnprop.datanode;
- System.out.println("MiniDFSCluster Stopping DataNode " +
+ LOG.info("MiniDFSCluster Stopping DataNode " +
dn.getMachineName() +
" from a total of " + (dataNodes.size() + 1) +
" datanodes.");
@@ -1350,7 +1355,7 @@ public class MiniDFSCluster {
for (int i = dataNodes.size() - 1; i >= 0; i--) {
if (!restartDataNode(i, keepPort))
return false;
- System.out.println("Restarted DataNode " + i);
+ LOG.info("Restarted DataNode " + i);
}
return true;
}
@@ -1377,8 +1382,8 @@ public class MiniDFSCluster {
} catch (IOException ioe) {
// This method above should never throw.
// It only throws IOE since it is exposed via RPC
- throw new AssertionError("Unexpected IOE thrown: "
- + StringUtils.stringifyException(ioe));
+ throw (AssertionError)(new AssertionError("Unexpected IOE thrown: "
+ + StringUtils.stringifyException(ioe)).initCause(ioe));
}
boolean isUp = false;
synchronized (this) {
@@ -1524,7 +1529,7 @@ public class MiniDFSCluster {
failedCount++;
// Cached RPC connection to namenode, if any, is expected to fail once
if (failedCount > 1) {
- System.out.println("Tried waitActive() " + failedCount
+ LOG.warn("Tried waitActive() " + failedCount
+ " time(s) and failed, giving up. "
+ StringUtils.stringifyException(e));
throw e;
@@ -1576,7 +1581,7 @@ public class MiniDFSCluster {
}
public void formatDataNodeDirs() throws IOException {
- base_dir = new File(getBaseDirectory());
+ base_dir = new File(determineDfsBaseDir());
data_dir = new File(base_dir, "data");
if (data_dir.exists() && !FileUtil.fullyDelete(data_dir)) {
throw new IOException("Cannot remove data directory: " + data_dir);
@@ -1697,8 +1702,49 @@ public class MiniDFSCluster {
return data_dir.getAbsolutePath();
}
+ /**
+ * Get the base directory for this MiniDFS instance.
+ *
+ * Within the MiniDFCluster class and any subclasses, this method should be
+ * used instead of {@link #getBaseDirectory()} which doesn't support
+ * configuration-specific base directories.
+ *
+ * First the Configuration property {@link #HDFS_MINIDFS_BASEDIR} is fetched.
+ * If non-null, this is returned.
+ * If this is null, then {@link #getBaseDirectory()} is called.
+ * @return the base directory for this instance.
+ */
+ protected String determineDfsBaseDir() {
+ String dfsdir = conf.get(HDFS_MINIDFS_BASEDIR, null);
+ if (dfsdir == null) {
+ dfsdir = getBaseDirectory();
+ }
+ return dfsdir;
+ }
+
+ /**
+ * Get the base directory for any DFS cluster whose configuration does
+ * not explicitly set it. This is done by retrieving the system property
+ * {@link #PROP_TEST_BUILD_DATA} (defaulting to "build/test/data" ),
+ * and returning that directory with a subdir of /dfs.
+ * @return a directory for use as a miniDFS filesystem.
+ */
public static String getBaseDirectory() {
- return System.getProperty("test.build.data", "build/test/data") + "/dfs/";
+ return System.getProperty(PROP_TEST_BUILD_DATA, "build/test/data") + "/dfs/";
+ }
+
+ /**
+ * Get a storage directory for a datanode in this specific instance of
+ * a MiniCluster.
+ *
+ * @param dnIndex datanode index (starts from 0)
+ * @param dirIndex directory index (0 or 1). Index 0 provides access to the
+ * first storage directory. Index 1 provides access to the second
+ * storage directory.
+ * @return Storage directory
+ */
+ public File getInstanceStorageDir(int dnIndex, int dirIndex) {
+ return new File(base_dir, getStorageDirPath(dnIndex, dirIndex));
}
/**
@@ -1716,13 +1762,25 @@ public class MiniDFSCluster {
* @return Storage directory
*/
public static File getStorageDir(int dnIndex, int dirIndex) {
- return new File(getBaseDirectory() + "data/data" + (2*dnIndex + 1 + dirIndex));
+ return new File(getBaseDirectory(), getStorageDirPath(dnIndex, dirIndex));
}
-
+
/**
- * Get current directory corresponding to the datanode
- * @param storageDir
- * @return current directory
+ * Calculate the DN instance-specific path for appending to the base dir
+ * to determine the location of the storage of a DN instance in the mini cluster
+ * @param dnIndex datanode index
+ * @param dirIndex directory index (0 or 1).
+ * @return
+ */
+ private static String getStorageDirPath(int dnIndex, int dirIndex) {
+ return "data/data" + (2 * dnIndex + 1 + dirIndex);
+ }
+
+ /**
+ * Get current directory corresponding to the datanode as defined in
+ * (@link Storage#STORAGE_DIR_CURRENT}
+ * @param storageDir the storage directory of a datanode.
+ * @return the datanode current directory
*/
public static String getDNCurrentDir(File storageDir) {
return storageDir + "/" + Storage.STORAGE_DIR_CURRENT + "/";
@@ -1730,8 +1788,8 @@ public class MiniDFSCluster {
/**
* Get directory corresponding to block pool directory in the datanode
- * @param storageDir
- * @return current directory
+ * @param storageDir the storage directory of a datanode.
+ * @return the block pool directory
*/
public static String getBPDir(File storageDir, String bpid) {
return getDNCurrentDir(storageDir) + bpid + "/";
@@ -1777,6 +1835,16 @@ public class MiniDFSCluster {
return new File(getFinalizedDir(storageDir, blk.getBlockPoolId()),
blk.getBlockName());
}
+
+ /**
+ * Shut down a cluster if it is not null
+ * @param cluster cluster reference or null
+ */
+ public static void shutdownCluster(MiniDFSCluster cluster) {
+ if (cluster != null) {
+ cluster.shutdown();
+ }
+ }
/**
* Get all files related to a block from all the datanodes
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
index e5846abe797..3be88d3e5cd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestByteRangeInputStream.java
@@ -35,28 +35,29 @@ import org.apache.hadoop.hdfs.ByteRangeInputStream.URLOpener;
import org.junit.Test;
class MockHttpURLConnection extends HttpURLConnection {
- private int responseCode = -1;
- URL m;
-
public MockHttpURLConnection(URL u) {
super(u);
- m = u;
}
+ @Override
public boolean usingProxy(){
return false;
}
+ @Override
public void disconnect() {
}
- public void connect() throws IOException {
+ @Override
+ public void connect() {
}
+ @Override
public InputStream getInputStream() throws IOException {
return new ByteArrayInputStream("asdf".getBytes());
}
+ @Override
public URL getURL() {
URL u = null;
try {
@@ -67,6 +68,7 @@ class MockHttpURLConnection extends HttpURLConnection {
return u;
}
+ @Override
public int getResponseCode() {
if (responseCode != -1) {
return responseCode;
@@ -82,10 +84,45 @@ class MockHttpURLConnection extends HttpURLConnection {
public void setResponseCode(int resCode) {
responseCode = resCode;
}
-
}
public class TestByteRangeInputStream {
+ @Test
+ public void testRemoveOffset() throws IOException {
+ { //no offset
+ String s = "http://test/Abc?Length=99";
+ assertEquals(s, ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+ }
+
+ { //no parameters
+ String s = "http://test/Abc";
+ assertEquals(s, ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+ }
+
+ { //offset as first parameter
+ String s = "http://test/Abc?offset=10&Length=99";
+ assertEquals("http://test/Abc?Length=99",
+ ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+ }
+
+ { //offset as second parameter
+ String s = "http://test/Abc?op=read&OFFset=10&Length=99";
+ assertEquals("http://test/Abc?op=read&Length=99",
+ ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+ }
+
+ { //offset as last parameter
+ String s = "http://test/Abc?Length=99&offset=10";
+ assertEquals("http://test/Abc?Length=99",
+ ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+ }
+
+ { //offset as the only parameter
+ String s = "http://test/Abc?offset=10";
+ assertEquals("http://test/Abc",
+ ByteRangeInputStream.removeOffsetParam(new URL(s)).toString());
+ }
+ }
@Test
public void testByteRange() throws IOException {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
index 9944f1f2c8d..482f12b00dd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestCrcCorruption.java
@@ -83,7 +83,7 @@ public class TestCrcCorruption {
// file disallows this Datanode to send data to another datanode.
// However, a client is alowed access to this block.
//
- File storageDir = MiniDFSCluster.getStorageDir(0, 1);
+ File storageDir = cluster.getInstanceStorageDir(0, 1);
String bpid = cluster.getNamesystem().getBlockPoolId();
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
@@ -142,7 +142,7 @@ public class TestCrcCorruption {
// Now deliberately corrupt all meta blocks from the second
// directory of the first datanode
//
- storageDir = MiniDFSCluster.getStorageDir(0, 1);
+ storageDir = cluster.getInstanceStorageDir(0, 1);
data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
blocks = data_dir.listFiles();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
index 95bf47f97c3..a8f814b6526 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSRollback.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
import org.apache.hadoop.util.StringUtils;
+import com.google.common.base.Charsets;
import com.google.common.collect.Lists;
/**
@@ -263,10 +264,14 @@ public class TestDFSRollback extends TestCase {
UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "previous");
for (File f : baseDirs) {
- UpgradeUtilities.corruptFile(new File(f,"VERSION"));
+ UpgradeUtilities.corruptFile(
+ new File(f,"VERSION"),
+ "layoutVersion".getBytes(Charsets.UTF_8),
+ "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.ROLLBACK,
"file VERSION has layoutVersion missing");
+
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
log("NameNode rollback with old layout version in previous", numDirs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
index 251f23dee70..a308c230cb0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSUpgrade.java
@@ -39,6 +39,7 @@ import org.junit.BeforeClass;
import org.junit.Ignore;
import org.junit.Test;
+import com.google.common.base.Charsets;
import com.google.common.base.Joiner;
import static org.junit.Assert.*;
@@ -303,7 +304,10 @@ public class TestDFSUpgrade {
log("NameNode upgrade with corrupt version file", numDirs);
baseDirs = UpgradeUtilities.createNameNodeStorageDirs(nameNodeDirs, "current");
for (File f : baseDirs) {
- UpgradeUtilities.corruptFile(new File (f,"VERSION"));
+ UpgradeUtilities.corruptFile(
+ new File(f,"VERSION"),
+ "layoutVersion".getBytes(Charsets.UTF_8),
+ "xxxxxxxxxxxxx".getBytes(Charsets.UTF_8));
}
startNameNodeShouldFail(StartupOption.UPGRADE);
UpgradeUtilities.createEmptyDirs(nameNodeDirs);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
index 72faa319b7a..d02ae1da353 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
@@ -31,6 +31,7 @@ import java.util.Random;
import junit.framework.TestCase;
+import org.apache.commons.digester.SetRootRule;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.HdfsConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil;
import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
import org.apache.hadoop.hdfs.protocol.datatransfer.Op;
import org.apache.hadoop.hdfs.protocol.datatransfer.PacketHeader;
@@ -50,6 +52,7 @@ import org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck;
import org.apache.hadoop.hdfs.protocol.datatransfer.Sender;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto.Builder;
+import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.ReadOpChecksumInfoProto;
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants;
@@ -59,6 +62,7 @@ import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.util.DataChecksum;
import org.apache.hadoop.util.StringUtils;
import org.junit.Test;
+import org.mockito.Mockito;
/**
* This tests data transfer protocol handling in the Datanode. It sends
@@ -68,6 +72,9 @@ public class TestDataTransferProtocol extends TestCase {
private static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.hdfs.TestDataTransferProtocol");
+
+ private static final DataChecksum DEFAULT_CHECKSUM =
+ DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512);
DatanodeID datanode;
InetSocketAddress dnAddr;
@@ -117,10 +124,8 @@ public class TestDataTransferProtocol extends TestCase {
throw eof;
}
- LOG.info("Received: " +
- StringUtils.byteToHexString(retBuf));
- LOG.info("Expected: " +
- StringUtils.byteToHexString(recvBuf.toByteArray()));
+ LOG.info("Received: " +new String(retBuf));
+ LOG.info("Expected: " + StringUtils.byteToHexString(recvBuf.toByteArray()));
if (eofExpected) {
throw new IOException("Did not recieve IOException when an exception " +
@@ -129,10 +134,8 @@ public class TestDataTransferProtocol extends TestCase {
}
byte[] needed = recvBuf.toByteArray();
- for (int i=0; i token = new Token
+ (new byte[0], new byte[0],
+ DelegationTokenIdentifier.HDFS_DELEGATION_KIND,
+ new Text("127.0.0.1:8020"));
+ user.addToken(token);
+ Token> token2 = new Token
+ (null, null, new Text("other token"), new Text("127.0.0.1:8020"));
+ user.addToken(token2);
+ assertEquals("wrong tokens in user", 2, user.getTokens().size());
+ FileSystem fs =
+ user.doAs(new PrivilegedExceptionAction() {
+ public FileSystem run() throws Exception {
+ return FileSystem.get(new URI("hftp://localhost:50470/"), conf);
+ }
+ });
+ assertSame("wrong kind of file system", HftpFileSystem.class,
+ fs.getClass());
+ Field renewToken = HftpFileSystem.class.getDeclaredField("renewToken");
+ renewToken.setAccessible(true);
+ assertSame("wrong token", token, renewToken.get(fs));
+ }
+}
\ No newline at end of file
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
new file mode 100644
index 00000000000..5948178e79c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMiniDFSCluster.java
@@ -0,0 +1,108 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs;
+
+import junit.framework.Assert;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+import java.io.File;
+
+/**
+ * Tests MiniDFS cluster setup/teardown and isolation.
+ * Every instance is brought up with a new data dir, to ensure that
+ * shutdown work in background threads don't interfere with bringing up
+ * the new cluster.
+ */
+public class TestMiniDFSCluster {
+
+ private static final String CLUSTER_1 = "cluster1";
+ private static final String CLUSTER_2 = "cluster2";
+ private static final String CLUSTER_3 = "cluster3";
+ protected String testDataPath;
+ protected File testDataDir;
+ @Before
+ public void setUp() {
+ testDataPath = System.getProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
+ testDataDir = new File(new File(testDataPath).getParentFile(),
+ "miniclusters");
+
+
+ }
+ @After
+ public void tearDown() {
+ System.setProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA, testDataPath);
+ }
+
+ /**
+ * Verify that without system properties the cluster still comes up, provided
+ * the configuration is set
+ *
+ * @throws Throwable on a failure
+ */
+ @Test
+ public void testClusterWithoutSystemProperties() throws Throwable {
+ System.clearProperty(MiniDFSCluster.PROP_TEST_BUILD_DATA);
+ Configuration conf = new HdfsConfiguration();
+ File testDataCluster1 = new File(testDataPath, CLUSTER_1);
+ String c1Path = testDataCluster1.getAbsolutePath();
+ conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c1Path);
+ MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
+ try {
+ Assert.assertEquals(c1Path+"/data", cluster.getDataDirectory());
+ } finally {
+ cluster.shutdown();
+ }
+ }
+
+ /**
+ * Bring up two clusters and assert that they are in different directories.
+ * @throws Throwable on a failure
+ */
+ @Test
+ public void testDualClusters() throws Throwable {
+ File testDataCluster2 = new File(testDataPath, CLUSTER_2);
+ File testDataCluster3 = new File(testDataPath, CLUSTER_3);
+ Configuration conf = new HdfsConfiguration();
+ String c2Path = testDataCluster2.getAbsolutePath();
+ conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR, c2Path);
+ MiniDFSCluster cluster2 = new MiniDFSCluster.Builder(conf).build();
+ MiniDFSCluster cluster3 = null;
+ try {
+ String dataDir2 = cluster2.getDataDirectory();
+ Assert.assertEquals(c2Path + "/data", dataDir2);
+ //change the data dir
+ conf.set(MiniDFSCluster.HDFS_MINIDFS_BASEDIR,
+ testDataCluster3.getAbsolutePath());
+ MiniDFSCluster.Builder builder = new MiniDFSCluster.Builder(conf);
+ cluster3 = builder.build();
+ String dataDir3 = cluster3.getDataDirectory();
+ Assert.assertTrue("Clusters are bound to the same directory: " + dataDir2,
+ !dataDir2.equals(dataDir3));
+ } finally {
+ MiniDFSCluster.shutdownCluster(cluster3);
+ MiniDFSCluster.shutdownCluster(cluster2);
+ }
+ }
+
+
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
index b230391dd02..707a2b1fb7c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestSetTimes.java
@@ -158,6 +158,24 @@ public class TestSetTimes extends TestCase {
assertTrue(atime2 == stat.getAccessTime());
assertTrue(mtime2 == mtime3);
+ long mtime4 = System.currentTimeMillis() - (3600L * 1000L);
+ long atime4 = System.currentTimeMillis();
+ fileSys.setTimes(dir1, mtime4, atime4);
+ // check new modification time on file
+ stat = fileSys.getFileStatus(dir1);
+ assertTrue("Not matching the modification times", mtime4 == stat
+ .getModificationTime());
+ assertTrue("Not matching the access times", atime4 == stat
+ .getAccessTime());
+
+ Path nonExistingDir = new Path(dir1, "/nonExistingDir/");
+ try {
+ fileSys.setTimes(nonExistingDir, mtime4, atime4);
+ fail("Expecting FileNotFoundException");
+ } catch (FileNotFoundException e) {
+ assertTrue(e.getMessage().contains(
+ "File/Directory " + nonExistingDir.toString() + " does not exist."));
+ }
// shutdown cluster and restart
cluster.shutdown();
try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
index 337fa8a17c0..0b6bceafafc 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/UpgradeUtilities.java
@@ -24,10 +24,8 @@ import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.OutputStream;
-import java.io.RandomAccessFile;
import java.net.URI;
import java.util.Arrays;
-import java.util.Random;
import java.util.Collections;
import java.util.zip.CRC32;
import org.apache.hadoop.conf.Configuration;
@@ -53,6 +51,10 @@ import org.apache.hadoop.hdfs.server.datanode.DataStorage;
import org.apache.hadoop.hdfs.server.namenode.NNStorage;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import com.google.common.base.Preconditions;
+import com.google.common.io.Files;
+import com.google.common.primitives.Bytes;
+
/**
* This class defines a number of static helper methods used by the
* DFS Upgrade unit tests. By default, a singleton master populated storage
@@ -483,20 +485,26 @@ public class UpgradeUtilities {
* @throws IllegalArgumentException if the given file is not a file
* @throws IOException if an IOException occurs while reading or writing the file
*/
- public static void corruptFile(File file) throws IOException {
+ public static void corruptFile(File file,
+ byte[] stringToCorrupt,
+ byte[] replacement) throws IOException {
+ Preconditions.checkArgument(replacement.length == stringToCorrupt.length);
if (!file.isFile()) {
throw new IllegalArgumentException(
- "Given argument is not a file:" + file);
+ "Given argument is not a file:" + file);
}
- RandomAccessFile raf = new RandomAccessFile(file,"rws");
- Random random = new Random();
- for (long i = 0; i < raf.length(); i++) {
- raf.seek(i);
- if (random.nextBoolean()) {
- raf.writeByte(random.nextInt());
- }
+ byte[] data = Files.toByteArray(file);
+ int index = Bytes.indexOf(data, stringToCorrupt);
+ if (index == -1) {
+ throw new IOException(
+ "File " + file + " does not contain string " +
+ new String(stringToCorrupt));
}
- raf.close();
+
+ for (int i = 0; i < stringToCorrupt.length; i++) {
+ data[index + i] = replacement[i];
+ }
+ Files.write(data, file);
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
index 9c577f740ee..7808e09b900 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/TestDelegationToken.java
@@ -148,7 +148,7 @@ public class TestDelegationToken {
@Test
public void testDelegationTokenDFSApi() throws Exception {
DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
- Token token = dfs.getDelegationToken("JobTracker");
+ final Token token = dfs.getDelegationToken("JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
identifier.readFields(new DataInputStream(
@@ -156,6 +156,15 @@ public class TestDelegationToken {
LOG.info("A valid token should have non-null password, and should be renewed successfully");
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token, "JobTracker");
+ UserGroupInformation.createRemoteUser("JobTracker").doAs(
+ new PrivilegedExceptionAction() {
+ @Override
+ public Object run() throws Exception {
+ token.renew(config);
+ token.cancel(config);
+ return null;
+ }
+ });
}
@Test
@@ -174,15 +183,26 @@ public class TestDelegationToken {
}
});
- final Token token = webhdfs.getDelegationToken("JobTracker");
+ final Token token = webhdfs
+ .getDelegationToken("JobTracker");
DelegationTokenIdentifier identifier = new DelegationTokenIdentifier();
byte[] tokenId = token.getIdentifier();
- identifier.readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
+ identifier
+ .readFields(new DataInputStream(new ByteArrayInputStream(tokenId)));
LOG.info("A valid token should have non-null password, and should be renewed successfully");
Assert.assertTrue(null != dtSecretManager.retrievePassword(identifier));
dtSecretManager.renewToken(token, "JobTracker");
+ ugi.doAs(new PrivilegedExceptionAction() {
+ @Override
+ public Object run() throws Exception {
+ token.renew(config);
+ token.cancel(config);
+ return null;
+ }
+ });
}
+ @SuppressWarnings("deprecation")
@Test
public void testDelegationTokenWithDoAs() throws Exception {
final DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
index 9ad87fe0875..fd9c91d88c4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
@@ -51,12 +51,12 @@ import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
import org.apache.hadoop.io.TestWritable;
import org.apache.hadoop.ipc.Client;
import org.apache.hadoop.ipc.ProtocolSignature;
@@ -96,9 +96,9 @@ public class TestBlockToken {
((Log4JLogger) SaslRpcServer.LOG).getLogger().setLevel(Level.ALL);
((Log4JLogger) SaslInputStream.LOG).getLogger().setLevel(Level.ALL);
}
-
+
/** Directory where we can count our open file descriptors under Linux */
- static File FD_DIR = new File("/proc/self/fd/");
+ static File FD_DIR = new File("/proc/self/fd/");
long blockKeyUpdateInterval = 10 * 60 * 1000; // 10 mins
long blockTokenLifetime = 2 * 60 * 1000; // 2 mins
@@ -120,7 +120,8 @@ public class TestBlockToken {
public Long answer(InvocationOnMock invocation) throws IOException {
Object args[] = invocation.getArguments();
assertEquals(1, args.length);
- ExtendedBlock block = (ExtendedBlock) args[0];
+ org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable block =
+ (org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable) args[0];
Set tokenIds = UserGroupInformation.getCurrentUser()
.getTokenIdentifiers();
assertEquals("Only one BlockTokenIdentifier expected", 1, tokenIds.size());
@@ -129,7 +130,9 @@ public class TestBlockToken {
BlockTokenIdentifier id = (BlockTokenIdentifier) tokenId;
LOG.info("Got: " + id.toString());
assertTrue("Received BlockTokenIdentifier is wrong", ident.equals(id));
- sm.checkAccess(id, null, block, BlockTokenSecretManager.AccessMode.WRITE);
+ sm.checkAccess(id, null, org.apache.hadoop.hdfs.protocolR23Compatible.
+ ExtendedBlockWritable.convertExtendedBlock(block),
+ BlockTokenSecretManager.AccessMode.WRITE);
result = id.getBlockId();
}
return result;
@@ -137,7 +140,8 @@ public class TestBlockToken {
}
private BlockTokenIdentifier generateTokenId(BlockTokenSecretManager sm,
- ExtendedBlock block, EnumSet accessModes)
+ ExtendedBlock block,
+ EnumSet accessModes)
throws IOException {
Token token = sm.generateToken(block, accessModes);
BlockTokenIdentifier id = sm.createIdentifier();
@@ -151,12 +155,12 @@ public class TestBlockToken {
TestWritable.testWritable(new BlockTokenIdentifier());
BlockTokenSecretManager sm = new BlockTokenSecretManager(true,
blockKeyUpdateInterval, blockTokenLifetime);
- TestWritable.testWritable(generateTokenId(sm, block1, EnumSet
- .allOf(BlockTokenSecretManager.AccessMode.class)));
- TestWritable.testWritable(generateTokenId(sm, block2, EnumSet
- .of(BlockTokenSecretManager.AccessMode.WRITE)));
- TestWritable.testWritable(generateTokenId(sm, block3, EnumSet
- .noneOf(BlockTokenSecretManager.AccessMode.class)));
+ TestWritable.testWritable(generateTokenId(sm, block1,
+ EnumSet.allOf(BlockTokenSecretManager.AccessMode.class)));
+ TestWritable.testWritable(generateTokenId(sm, block2,
+ EnumSet.of(BlockTokenSecretManager.AccessMode.WRITE)));
+ TestWritable.testWritable(generateTokenId(sm, block3,
+ EnumSet.noneOf(BlockTokenSecretManager.AccessMode.class)));
}
private void tokenGenerationAndVerification(BlockTokenSecretManager master,
@@ -176,8 +180,8 @@ public class TestBlockToken {
slave.checkAccess(token2, null, block2, mode);
}
// multi-mode tokens
- Token mtoken = master.generateToken(block3, EnumSet
- .allOf(BlockTokenSecretManager.AccessMode.class));
+ Token mtoken = master.generateToken(block3,
+ EnumSet.allOf(BlockTokenSecretManager.AccessMode.class));
for (BlockTokenSecretManager.AccessMode mode : BlockTokenSecretManager.AccessMode
.values()) {
master.checkAccess(mtoken, null, block3, mode);
@@ -202,25 +206,28 @@ public class TestBlockToken {
slaveHandler.setKeys(keys);
tokenGenerationAndVerification(masterHandler, slaveHandler);
}
-
+
private Server createMockDatanode(BlockTokenSecretManager sm,
Token token) throws IOException {
- ClientDatanodeProtocol mockDN = mock(ClientDatanodeProtocol.class);
+ org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol mockDN =
+ mock(org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class);
when(mockDN.getProtocolVersion(anyString(), anyLong())).thenReturn(
- ClientDatanodeProtocol.versionID);
- doReturn(ProtocolSignature.getProtocolSignature(
- mockDN, ClientDatanodeProtocol.class.getName(),
- ClientDatanodeProtocol.versionID, 0))
- .when(mockDN).getProtocolSignature(anyString(), anyLong(), anyInt());
+ org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.versionID);
+ doReturn(
+ ProtocolSignature.getProtocolSignature(mockDN,
+ org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class.getName(),
+ org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.versionID, 0)).when(mockDN)
+ .getProtocolSignature(anyString(), anyLong(), anyInt());
BlockTokenIdentifier id = sm.createIdentifier();
id.readFields(new DataInputStream(new ByteArrayInputStream(token
.getIdentifier())));
doAnswer(new getLengthAnswer(sm, id)).when(mockDN).getReplicaVisibleLength(
- any(ExtendedBlock.class));
+ any(org.apache.hadoop.hdfs.protocolR23Compatible.ExtendedBlockWritable.class));
- return RPC.getServer(ClientDatanodeProtocol.class, mockDN,
- ADDRESS, 0, 5, true, conf, sm);
+ return RPC.getServer(org.apache.hadoop.hdfs.protocolR23Compatible.ClientDatanodeWireProtocol.class,
+ mockDN, ADDRESS, 0, 5,
+ true, conf, sm);
}
@Test
@@ -241,9 +248,8 @@ public class TestBlockToken {
ClientDatanodeProtocol proxy = null;
try {
- proxy = RPC.getProxy(
- ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID, addr,
- ticket, conf, NetUtils.getDefaultSocketFactory(conf));
+ proxy = DFSUtil.createClientDatanodeProtocolProxy(addr, ticket, conf,
+ NetUtils.getDefaultSocketFactory(conf));
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
} finally {
server.stop();
@@ -255,8 +261,8 @@ public class TestBlockToken {
/**
* Test that fast repeated invocations of createClientDatanodeProtocolProxy
- * will not end up using up thousands of sockets. This is a regression test for
- * HDFS-1965.
+ * will not end up using up thousands of sockets. This is a regression test
+ * for HDFS-1965.
*/
@Test
public void testBlockTokenRpcLeak() throws Exception {
@@ -270,9 +276,9 @@ public class TestBlockToken {
server.start();
final InetSocketAddress addr = NetUtils.getConnectAddress(server);
- DatanodeID fakeDnId = new DatanodeID(
- "localhost:" + addr.getPort(), "fake-storage", 0, addr.getPort());
-
+ DatanodeID fakeDnId = new DatanodeID("localhost:" + addr.getPort(),
+ "fake-storage", 0, addr.getPort());
+
ExtendedBlock b = new ExtendedBlock("fake-pool", new Block(12345L));
LocatedBlock fakeBlock = new LocatedBlock(b, new DatanodeInfo[0]);
fakeBlock.setBlockToken(token);
@@ -282,19 +288,19 @@ public class TestBlockToken {
// RPC "Client" object to stay above 0 such that RPC.stopProxy doesn't
// actually close the TCP connections to the real target DN.
ClientDatanodeProtocol proxyToNoWhere = RPC.getProxy(
- ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
+ ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
new InetSocketAddress("1.1.1.1", 1),
- UserGroupInformation.createRemoteUser("junk"),
- conf, NetUtils.getDefaultSocketFactory(conf));
-
+ UserGroupInformation.createRemoteUser("junk"), conf,
+ NetUtils.getDefaultSocketFactory(conf));
+
ClientDatanodeProtocol proxy = null;
int fdsAtStart = countOpenFileDescriptors();
try {
long endTime = System.currentTimeMillis() + 3000;
while (System.currentTimeMillis() < endTime) {
- proxy = DFSUtil.createClientDatanodeProtocolProxy(
- fakeDnId, conf, 1000, fakeBlock);
+ proxy = DFSUtil.createClientDatanodeProtocolProxy(fakeDnId, conf, 1000,
+ fakeBlock);
assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
if (proxy != null) {
RPC.stopProxy(proxy);
@@ -303,32 +309,31 @@ public class TestBlockToken {
}
int fdsAtEnd = countOpenFileDescriptors();
-
+
if (fdsAtEnd - fdsAtStart > 50) {
fail("Leaked " + (fdsAtEnd - fdsAtStart) + " fds!");
}
} finally {
server.stop();
}
-
+
RPC.stopProxy(proxyToNoWhere);
}
/**
- * @return the current number of file descriptors open by this
- * process.
+ * @return the current number of file descriptors open by this process.
*/
private static int countOpenFileDescriptors() throws IOException {
return FD_DIR.list().length;
}
- /**
+ /**
* Test {@link BlockPoolTokenSecretManager}
*/
@Test
public void testBlockPoolTokenSecretManager() throws Exception {
BlockPoolTokenSecretManager bpMgr = new BlockPoolTokenSecretManager();
-
+
// Test BlockPoolSecretManager with upto 10 block pools
for (int i = 0; i < 10; i++) {
String bpid = Integer.toString(i);
@@ -337,12 +342,11 @@ public class TestBlockToken {
BlockTokenSecretManager slaveHandler = new BlockTokenSecretManager(false,
blockKeyUpdateInterval, blockTokenLifetime);
bpMgr.addBlockPool(bpid, slaveHandler);
-
-
+
ExportedBlockKeys keys = masterHandler.exportKeys();
bpMgr.setKeys(bpid, keys);
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
-
+
// Test key updating
masterHandler.updateKeys();
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
@@ -351,11 +355,12 @@ public class TestBlockToken {
tokenGenerationAndVerification(masterHandler, bpMgr.get(bpid));
}
}
-
+
/**
- * This test writes a file and gets the block locations without closing
- * the file, and tests the block token in the last block. Block token is
- * verified by ensuring it is of correct kind.
+ * This test writes a file and gets the block locations without closing the
+ * file, and tests the block token in the last block. Block token is verified
+ * by ensuring it is of correct kind.
+ *
* @throws IOException
* @throws InterruptedException
*/
@@ -389,5 +394,5 @@ public class TestBlockToken {
} finally {
cluster.shutdown();
}
- }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
index 565a765b1f1..44d733df5ae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestBlockManager.java
@@ -80,7 +80,7 @@ public class TestBlockManager {
"need to set a dummy value here so it assumes a multi-rack cluster");
fsn = Mockito.mock(FSNamesystem.class);
Mockito.doReturn(true).when(fsn).hasWriteLock();
- bm = new BlockManager(fsn, conf);
+ bm = new BlockManager(fsn, fsn, conf);
}
private void addNodes(Iterable nodesToAdd) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
index 102b41ca8e0..f7a5c0e065e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestOverReplicatedBlocks.java
@@ -63,7 +63,7 @@ public class TestOverReplicatedBlocks extends TestCase {
DataNodeProperties dnProps = cluster.stopDataNode(0);
// remove block scanner log to trigger block scanning
File scanLog = new File(MiniDFSCluster.getFinalizedDir(
- MiniDFSCluster.getStorageDir(0, 0),
+ cluster.getInstanceStorageDir(0, 0),
cluster.getNamesystem().getBlockPoolId()).getParent().toString()
+ "/../dncp_block_verification.log.prev");
//wait for one minute for deletion to succeed;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
new file mode 100644
index 00000000000..20c2541119c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlockQueues.java
@@ -0,0 +1,103 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.blockmanagement;
+
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestUnderReplicatedBlockQueues extends Assert {
+
+ /**
+ * Test that adding blocks with different replication counts puts them
+ * into different queues
+ * @throws Throwable if something goes wrong
+ */
+ @Test
+ public void testBlockPriorities() throws Throwable {
+ UnderReplicatedBlocks queues = new UnderReplicatedBlocks();
+ Block block1 = new Block(1);
+ Block block2 = new Block(2);
+ Block block_very_under_replicated = new Block(3);
+ Block block_corrupt = new Block(4);
+
+ //add a block with a single entry
+ assertAdded(queues, block1, 1, 0, 3);
+
+ assertEquals(1, queues.getUnderReplicatedBlockCount());
+ assertEquals(1, queues.size());
+ assertInLevel(queues, block1, UnderReplicatedBlocks.QUEUE_HIGHEST_PRIORITY);
+ //repeated additions fail
+ assertFalse(queues.add(block1, 1, 0, 3));
+
+ //add a second block with two replicas
+ assertAdded(queues, block2, 2, 0, 3);
+ assertEquals(2, queues.getUnderReplicatedBlockCount());
+ assertEquals(2, queues.size());
+ assertInLevel(queues, block2, UnderReplicatedBlocks.QUEUE_UNDER_REPLICATED);
+ //now try to add a block that is corrupt
+ assertAdded(queues, block_corrupt, 0, 0, 3);
+ assertEquals(3, queues.size());
+ assertEquals(2, queues.getUnderReplicatedBlockCount());
+ assertEquals(1, queues.getCorruptBlockSize());
+ assertInLevel(queues, block_corrupt,
+ UnderReplicatedBlocks.QUEUE_WITH_CORRUPT_BLOCKS);
+
+ //insert a very under-replicated block
+ assertAdded(queues, block_very_under_replicated, 4, 0, 25);
+ assertInLevel(queues, block_very_under_replicated,
+ UnderReplicatedBlocks.QUEUE_VERY_UNDER_REPLICATED);
+
+ }
+
+ private void assertAdded(UnderReplicatedBlocks queues,
+ Block block,
+ int curReplicas,
+ int decomissionedReplicas,
+ int expectedReplicas) {
+ assertTrue("Failed to add " + block,
+ queues.add(block,
+ curReplicas,
+ decomissionedReplicas,
+ expectedReplicas));
+ }
+
+ /**
+ * Determine whether or not a block is in a level without changing the API.
+ * Instead get the per-level iterator and run though it looking for a match.
+ * If the block is not found, an assertion is thrown.
+ *
+ * This is inefficient, but this is only a test case.
+ * @param queues queues to scan
+ * @param block block to look for
+ * @param level level to select
+ */
+ private void assertInLevel(UnderReplicatedBlocks queues,
+ Block block,
+ int level) {
+ UnderReplicatedBlocks.BlockIterator bi = queues.iterator(level);
+ while (bi.hasNext()) {
+ Block next = bi.next();
+ if (block.equals(next)) {
+ return;
+ }
+ }
+ fail("Block " + block + " not found in level " + level);
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
index a541bcb5d2d..89e48fb586f 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
@@ -324,7 +324,7 @@ public class TestDataNodeVolumeFailure {
final String bpid = cluster.getNamesystem().getBlockPoolId();
for(int i=0; i> prevProps = null;
+
+ for (File f : propFiles) {
+ Properties props;
+ FileInputStream is = new FileInputStream(f);
+ try {
+ props = new Properties();
+ props.load(is);
+ } finally {
+ IOUtils.closeStream(is);
+ }
+ if (prevProps == null) {
+ prevProps = props.entrySet();
+ } else {
+ Set> diff =
+ Sets.symmetricDifference(prevProps, props.entrySet());
+ if (!diff.isEmpty()) {
+ fail("Properties file " + f + " differs from " + propFiles[0]);
+ }
+ }
+ }
+ }
+
/**
* Assert that all of the given paths have the exact same
* contents
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
index 76f0b9408a7..acbd7d4ee09 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NameNodeAdapter.java
@@ -52,7 +52,7 @@ public class NameNodeAdapter {
* @return rpc server
*/
public static Server getRpcServer(NameNode namenode) {
- return ((NameNodeRpcServer)namenode.getRpcServer()).server;
+ return ((NameNodeRpcServer)namenode.getRpcServer()).clientRpcServer;
}
public static DelegationTokenSecretManager getDtSecretManager(
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
index 3fca8a38087..e22fa29927a 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
@@ -203,11 +203,9 @@ public class OfflineEditsViewerHelper {
"JobTracker/foo.com@FOO.COM");
try {
longUgi.doAs(new PrivilegedExceptionAction() {
- public Object run() throws IOException {
- final DistributedFileSystem dfs =
- (DistributedFileSystem) cluster.getFileSystem();
- dfs.renewDelegationToken(token);
- dfs.cancelDelegationToken(token);
+ public Object run() throws IOException, InterruptedException {
+ token.renew(config);
+ token.cancel(config);
return null;
}
});
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
index 2a27c37fc97..d392718ae22 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBackupNode.java
@@ -19,9 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
+import java.net.InetSocketAddress;
import java.util.Collections;
import java.util.List;
+import junit.framework.TestCase;
+
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.logging.impl.Log4JLogger;
@@ -29,13 +32,13 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.log4j.Level;
@@ -44,8 +47,6 @@ import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;
-import junit.framework.TestCase;
-
public class TestBackupNode extends TestCase {
public static final Log LOG = LogFactory.getLog(TestBackupNode.class);
@@ -241,8 +242,11 @@ public class TestBackupNode extends TestCase {
void testCheckpoint(StartupOption op) throws Exception {
Path file1 = new Path("checkpoint.dat");
Path file2 = new Path("checkpoint2.dat");
+ Path file3 = new Path("backup.dat");
Configuration conf = new HdfsConfiguration();
+ short replication = (short)conf.getInt("dfs.replication", 3);
+ int numDatanodes = Math.max(3, replication);
conf.set(DFSConfigKeys.DFS_BLOCKREPORT_INITIAL_DELAY_KEY, "0");
conf.setInt(DFSConfigKeys.DFS_DATANODE_SCAN_PERIOD_HOURS_KEY, -1); // disable block scanner
conf.setInt(DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_TXNS_KEY, 1);
@@ -290,7 +294,7 @@ public class TestBackupNode extends TestCase {
//
// Restart cluster and verify that file1 still exist.
//
- cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+ cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
.format(false).build();
fileSys = cluster.getFileSystem();
// check that file1 still exists
@@ -319,6 +323,26 @@ public class TestBackupNode extends TestCase {
backup.doCheckpoint();
waitCheckpointDone(cluster, backup, txid);
+ // Try BackupNode operations
+ InetSocketAddress add = backup.getNameNodeAddress();
+ // Write to BN
+ FileSystem bnFS = FileSystem.get(new Path("hdfs://"
+ + NameNode.getHostPortString(add)).toUri(), conf);
+ boolean canWrite = true;
+ try {
+ TestCheckpoint.writeFile(bnFS, file3, replication);
+ } catch (IOException eio) {
+ LOG.info("Write to BN failed as expected: ", eio);
+ canWrite = false;
+ }
+ assertFalse("Write to BackupNode must be prohibited.", canWrite);
+
+ TestCheckpoint.writeFile(fileSys, file3, replication);
+ TestCheckpoint.checkFile(fileSys, file3, replication);
+ // should also be on BN right away
+ assertTrue("file3 does not exist on BackupNode",
+ op != StartupOption.BACKUP || bnFS.exists(file3));
+
} catch(IOException e) {
LOG.error("Error in TestBackupNode:", e);
assertTrue(e.getLocalizedMessage(), false);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
index bc33f175bf1..2e73ec556a2 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
@@ -37,6 +37,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.ReceivedDeletedBlockInfo;
+import org.apache.hadoop.hdfs.server.protocol.RegisterCommand;
import org.junit.After;
import org.junit.Test;
@@ -129,7 +130,7 @@ public class TestDeadDatanode {
// that asks datanode to register again
DatanodeCommand[] cmd = dnp.sendHeartbeat(reg, 0, 0, 0, 0, 0, 0, 0);
Assert.assertEquals(1, cmd.length);
- Assert.assertEquals(cmd[0].getAction(), DatanodeCommand.REGISTER
+ Assert.assertEquals(cmd[0].getAction(), RegisterCommand.REGISTER
.getAction());
}
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
index c81ffa326b7..cef0a0db879 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsLimits.java
@@ -165,7 +165,7 @@ public class TestFsLimits {
Class> generated = null;
try {
fs.verifyFsLimits(inodes, 1, child);
- rootInode.addChild(child, false, false);
+ rootInode.addChild(child, false);
} catch (QuotaExceededException e) {
generated = e.getClass();
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
index de55d88467c..a2dc8f45dbd 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
@@ -466,7 +466,7 @@ public class TestFsck extends TestCase {
final String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i=0; i<4; i++) {
for (int j=0; j<=1; j++) {
- File storageDir = MiniDFSCluster.getStorageDir(i, j);
+ File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
File[] blocks = data_dir.listFiles();
if (blocks == null)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
index c285c667aa4..0bb43a14752 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestListCorruptFileBlocks.java
@@ -80,7 +80,7 @@ public class TestListCorruptFileBlocks {
// Now deliberately corrupt one block
String bpid = cluster.getNamesystem().getBlockPoolId();
- File storageDir = MiniDFSCluster.getStorageDir(0, 1);
+ File storageDir = cluster.getInstanceStorageDir(0, 1);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
assertTrue("data directory does not exist", data_dir.exists());
File[] blocks = data_dir.listFiles();
@@ -163,7 +163,7 @@ public class TestListCorruptFileBlocks {
+ " corrupt files. Expecting None.", badFiles.size() == 0);
// Now deliberately corrupt one block
- File storageDir = MiniDFSCluster.getStorageDir(0, 0);
+ File storageDir = cluster.getInstanceStorageDir(0, 0);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir,
cluster.getNamesystem().getBlockPoolId());
assertTrue("data directory does not exist", data_dir.exists());
@@ -284,7 +284,7 @@ public class TestListCorruptFileBlocks {
String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i = 0; i < 4; i++) {
for (int j = 0; j <= 1; j++) {
- File storageDir = MiniDFSCluster.getStorageDir(i, j);
+ File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
File[] blocks = data_dir.listFiles();
if (blocks == null)
@@ -391,7 +391,7 @@ public class TestListCorruptFileBlocks {
String bpid = cluster.getNamesystem().getBlockPoolId();
// For loop through number of datadirectories per datanode (2)
for (int i = 0; i < 2; i++) {
- File storageDir = MiniDFSCluster.getStorageDir(0, i);
+ File storageDir = cluster.getInstanceStorageDir(0, i);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
File[] blocks = data_dir.listFiles();
if (blocks == null)
@@ -466,7 +466,7 @@ public class TestListCorruptFileBlocks {
final String bpid = cluster.getNamesystem().getBlockPoolId();
for (int i=0; i<4; i++) {
for (int j=0; j<=1; j++) {
- File storageDir = MiniDFSCluster.getStorageDir(i, j);
+ File storageDir = cluster.getInstanceStorageDir(i, j);
File data_dir = MiniDFSCluster.getFinalizedDir(storageDir, bpid);
LOG.info("Removing files from " + data_dir);
File[] blocks = data_dir.listFiles();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
index 60410a220b3..15cb4805a5e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeResourceChecker.java
@@ -19,9 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.IOException;
+import java.util.HashMap;
+import java.util.Map;
import java.util.Set;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.DF;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem.NameNodeResourceMonitor;
@@ -29,6 +32,8 @@ import org.junit.Before;
import org.junit.Test;
import org.mockito.Mockito;
+import com.google.common.collect.Lists;
+
import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertFalse;
import static org.junit.Assert.assertTrue;
@@ -41,7 +46,7 @@ public class TestNameNodeResourceChecker {
@Before
public void setUp () throws IOException {
conf = new Configuration();
- baseDir = new File(conf.get("hadoop.tmp.dir"));
+ baseDir = new File(System.getProperty("test.build.data"));
nameDir = new File(baseDir, "resource-check-name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
@@ -50,8 +55,6 @@ public class TestNameNodeResourceChecker {
/**
* Tests that hasAvailableDiskSpace returns true if disk usage is below
* threshold.
- *
- * @throws IOException in case of errors
*/
@Test
public void testCheckAvailability()
@@ -67,8 +70,6 @@ public class TestNameNodeResourceChecker {
/**
* Tests that hasAvailableDiskSpace returns false if disk usage is above
* threshold.
- *
- * @throws IOException in case of errors
*/
@Test
public void testCheckAvailabilityNeg() throws IOException {
@@ -83,9 +84,6 @@ public class TestNameNodeResourceChecker {
/**
* Tests that NameNode resource monitor causes the NN to enter safe mode when
* resources are low.
- *
- * @throws IOException in case of errors
- * @throws InterruptedException
*/
@Test
public void testCheckThatNameNodeResourceMonitorIsRunning()
@@ -139,14 +137,12 @@ public class TestNameNodeResourceChecker {
/**
* Tests that only a single space check is performed if two name dirs are
* supplied which are on the same volume.
- *
- * @throws IOException
*/
@Test
public void testChecking2NameDirsOnOneVolume() throws IOException {
Configuration conf = new Configuration();
- File nameDir1 = new File(conf.get("hadoop.tmp.dir", "name-dir1"));
- File nameDir2 = new File(conf.get("hadoop.tmp.dir", "name-dir2"));
+ File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
+ File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
nameDir1.mkdirs();
nameDir2.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
@@ -162,13 +158,11 @@ public class TestNameNodeResourceChecker {
/**
* Tests that only a single space check is performed if extra volumes are
* configured manually which also coincide with a volume the name dir is on.
- *
- * @throws IOException
*/
@Test
public void testCheckingExtraVolumes() throws IOException {
Configuration conf = new Configuration();
- File nameDir = new File(conf.get("hadoop.tmp.dir", "name-dir"));
+ File nameDir = new File(System.getProperty("test.build.data"), "name-dir");
nameDir.mkdirs();
conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY, nameDir.getAbsolutePath());
conf.set(DFSConfigKeys.DFS_NAMENODE_CHECKED_VOLUMES_KEY, nameDir.getAbsolutePath());
@@ -179,4 +173,41 @@ public class TestNameNodeResourceChecker {
assertEquals("Should not check the same volume more than once.",
1, nb.getVolumesLowOnSpace().size());
}
+
+ /**
+ * Test that the NN is considered to be out of resources only once all
+ * configured volumes are low on resources.
+ */
+ @Test
+ public void testLowResourceVolumePolicy() throws IOException {
+ Configuration conf = new Configuration();
+ File nameDir1 = new File(System.getProperty("test.build.data"), "name-dir1");
+ File nameDir2 = new File(System.getProperty("test.build.data"), "name-dir2");
+ nameDir1.mkdirs();
+ nameDir2.mkdirs();
+
+ conf.set(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY,
+ nameDir1.getAbsolutePath() + "," + nameDir2.getAbsolutePath());
+
+ NameNodeResourceChecker nnrc = new NameNodeResourceChecker(conf);
+
+ // For the purpose of this test, we need to force the name dirs to appear to
+ // be on different volumes.
+ Map volumes = new HashMap();
+ volumes.put("volume1", new DF(nameDir1, conf));
+ volumes.put("volume2", new DF(nameDir2, conf));
+ nnrc.setVolumes(volumes);
+
+ NameNodeResourceChecker spyNnrc = Mockito.spy(nnrc);
+
+ Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn(
+ Lists.newArrayList("volume1"));
+
+ assertTrue(spyNnrc.hasAvailableDiskSpace());
+
+ Mockito.when(spyNnrc.getVolumesLowOnSpace()).thenReturn(
+ Lists.newArrayList("volume1", "volume2"));
+
+ assertFalse(spyNnrc.hasAvailableDiskSpace());
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
index da2bf4e22e3..88a1d0d955e 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestSafeMode.java
@@ -58,8 +58,9 @@ public class TestSafeMode {
String tipMsg = cluster.getNamesystem().getSafeModeTip();
assertTrue("Safemode tip message looks right",
- tipMsg.contains("The number of live datanodes 0 needs an " +
- "additional 1 live"));
+ tipMsg.contains("The number of live datanodes 0 needs an additional " +
+ "2 live datanodes to reach the minimum number 1. " +
+ "Safe mode will be turned off automatically."));
// Start a datanode
cluster.startDataNodes(conf, 1, true, null, null);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java
new file mode 100644
index 00000000000..0d6ff189edc
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestAuthFilter.java
@@ -0,0 +1,78 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web;
+
+import java.util.Collections;
+import java.util.Enumeration;
+import java.util.HashMap;
+import java.util.Map;
+import java.util.Properties;
+
+import javax.servlet.FilterConfig;
+import javax.servlet.ServletContext;
+import javax.servlet.ServletException;
+
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.security.authentication.server.PseudoAuthenticationHandler;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestAuthFilter {
+
+ private static class DummyFilterConfig implements FilterConfig {
+ final Map map;
+
+ DummyFilterConfig(Map map) {
+ this.map = map;
+ }
+
+ @Override
+ public String getFilterName() {
+ return "dummy";
+ }
+ @Override
+ public String getInitParameter(String arg0) {
+ return map.get(arg0);
+ }
+ @Override
+ public Enumeration getInitParameterNames() {
+ return Collections.enumeration(map.keySet());
+ }
+ @Override
+ public ServletContext getServletContext() {
+ return null;
+ }
+ }
+
+ @Test
+ public void testGetConfiguration() throws ServletException {
+ AuthFilter filter = new AuthFilter();
+ Map m = new HashMap();
+ m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_PRINCIPAL_KEY,
+ "xyz/thehost@REALM");
+ m.put(DFSConfigKeys.DFS_WEB_AUTHENTICATION_KERBEROS_KEYTAB_KEY,
+ "thekeytab");
+ FilterConfig config = new DummyFilterConfig(m);
+ Properties p = filter.getConfiguration("random", config);
+ Assert.assertEquals("xyz/thehost@REALM",
+ p.getProperty("kerberos.principal"));
+ Assert.assertEquals("thekeytab", p.getProperty("kerberos.keytab"));
+ Assert.assertEquals("true",
+ p.getProperty(PseudoAuthenticationHandler.ANONYMOUS_ALLOWED));
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
index a4b687d5e71..7f6aa36a6ef 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
@@ -46,9 +46,9 @@ public class TestJsonUtil {
final FileStatus fstatus = toFileStatus(status, parent);
System.out.println("status = " + status);
System.out.println("fstatus = " + fstatus);
- final String json = JsonUtil.toJsonString(status);
+ final String json = JsonUtil.toJsonString(status, true);
System.out.println("json = " + json.replace(",", ",\n "));
- final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map, ?>)JSON.parse(json));
+ final HdfsFileStatus s2 = JsonUtil.toFileStatus((Map, ?>)JSON.parse(json), true);
final FileStatus fs2 = toFileStatus(s2, parent);
System.out.println("s2 = " + s2);
System.out.println("fs2 = " + fs2);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
index abe07fc51f1..7d990bded59 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.hdfs.web;
import java.io.BufferedReader;
+import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStreamReader;
import java.net.HttpURLConnection;
@@ -26,17 +27,25 @@ import java.net.URI;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
+import javax.servlet.http.HttpServletResponse;
+
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystemContractBaseTest;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.web.resources.GetOpParam;
+import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
import org.apache.hadoop.hdfs.web.resources.PutOpParam;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Assert;
public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
private static final Configuration conf = new Configuration();
@@ -121,6 +130,8 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
}
}
+ //the following are new tests (i.e. not over-riding the super class methods)
+
public void testGetFileBlockLocations() throws IOException {
final String f = "/test/testGetFileBlockLocations";
createFile(path(f));
@@ -158,4 +169,141 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
//check if the command successes.
assertTrue(fs.getFileStatus(p).isDirectory());
}
+
+ public void testOpenNonExistFile() throws IOException {
+ final Path p = new Path("/test/testOpenNonExistFile");
+ //open it as a file, should get FileNotFoundException
+ try {
+ final FSDataInputStream in = fs.open(p);
+ in.read();
+ fail();
+ } catch(FileNotFoundException fnfe) {
+ WebHdfsFileSystem.LOG.info("This is expected.", fnfe);
+ }
+ }
+
+ public void testSeek() throws IOException {
+ final Path p = new Path("/test/testSeek");
+ createFile(p);
+
+ final int one_third = data.length/3;
+ final int two_third = one_third*2;
+
+ { //test seek
+ final int offset = one_third;
+ final int len = data.length - offset;
+ final byte[] buf = new byte[len];
+
+ final FSDataInputStream in = fs.open(p);
+ in.seek(offset);
+
+ //read all remaining data
+ in.readFully(buf);
+ in.close();
+
+ for (int i = 0; i < buf.length; i++) {
+ assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
+ data[i + offset], buf[i]);
+ }
+ }
+
+ { //test position read (read the data after the two_third location)
+ final int offset = two_third;
+ final int len = data.length - offset;
+ final byte[] buf = new byte[len];
+
+ final FSDataInputStream in = fs.open(p);
+ in.readFully(offset, buf);
+ in.close();
+
+ for (int i = 0; i < buf.length; i++) {
+ assertEquals("Position " + i + ", offset=" + offset + ", length=" + len,
+ data[i + offset], buf[i]);
+ }
+ }
+ }
+
+
+ public void testRootDir() throws IOException {
+ final Path root = new Path("/");
+
+ final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+ final URL url = webhdfs.toUrl(GetOpParam.Op.NULL, root);
+ WebHdfsFileSystem.LOG.info("null url=" + url);
+ Assert.assertTrue(url.toString().contains("v1"));
+
+ //test root permission
+ final FileStatus status = fs.getFileStatus(root);
+ assertTrue(status != null);
+ assertEquals(0777, status.getPermission().toShort());
+
+ //delete root - disabled due to a sticky bit bug
+ //assertFalse(fs.delete(root, true));
+
+ //create file using root path
+ try {
+ final FSDataOutputStream out = fs.create(root);
+ out.write(1);
+ out.close();
+ fail();
+ } catch(IOException e) {
+ WebHdfsFileSystem.LOG.info("This is expected.", e);
+ }
+
+ //open file using root path
+ try {
+ final FSDataInputStream in = fs.open(root);
+ in.read();
+ fail();
+ fail();
+ } catch(IOException e) {
+ WebHdfsFileSystem.LOG.info("This is expected.", e);
+ }
+ }
+
+ public void testResponseCode() throws IOException {
+ final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)fs;
+ final Path dir = new Path("/test/testUrl");
+ assertTrue(webhdfs.mkdirs(dir));
+
+ {//test set owner with empty parameters
+ final URL url = webhdfs.toUrl(PutOpParam.Op.SETOWNER, dir);
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.connect();
+ assertEquals(HttpServletResponse.SC_BAD_REQUEST, conn.getResponseCode());
+ conn.disconnect();
+ }
+
+ {//test set replication on a directory
+ final HttpOpParam.Op op = PutOpParam.Op.SETREPLICATION;
+ final URL url = webhdfs.toUrl(op, dir);
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod(op.getType().toString());
+ conn.connect();
+ assertEquals(HttpServletResponse.SC_FORBIDDEN, conn.getResponseCode());
+
+ assertFalse(webhdfs.setReplication(dir, (short)1));
+ conn.disconnect();
+ }
+
+ {//test get file status for a non-exist file.
+ final Path p = new Path(dir, "non-exist");
+ final URL url = webhdfs.toUrl(GetOpParam.Op.GETFILESTATUS, p);
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.connect();
+ assertEquals(HttpServletResponse.SC_NOT_FOUND, conn.getResponseCode());
+ conn.disconnect();
+ }
+
+ {//test set permission with empty parameters
+ final HttpOpParam.Op op = PutOpParam.Op.SETPERMISSION;
+ final URL url = webhdfs.toUrl(op, dir);
+ final HttpURLConnection conn = (HttpURLConnection) url.openConnection();
+ conn.setRequestMethod(op.getType().toString());
+ conn.connect();
+ assertEquals(HttpServletResponse.SC_OK, conn.getResponseCode());
+ assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
+ conn.disconnect();
+ }
+ }
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
new file mode 100644
index 00000000000..7cae2d6454a
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsUrl.java
@@ -0,0 +1,93 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.web;
+
+import java.io.IOException;
+import java.net.URI;
+import java.net.URL;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
+import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.hdfs.web.resources.DelegationParam;
+import org.apache.hadoop.hdfs.web.resources.TokenArgumentParam;
+import org.apache.hadoop.hdfs.web.resources.HttpOpParam;
+import org.apache.hadoop.hdfs.web.resources.PutOpParam;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
+import org.junit.Assert;
+import org.junit.Test;
+import static org.mockito.Mockito.mock;
+
+public class TestWebHdfsUrl {
+
+ @Test
+ public void testDelegationTokenInUrl() throws IOException {
+ Configuration conf = new Configuration();
+ final String uri = WebHdfsFileSystem.SCHEME + "://" + "127.0.0.1:9071";
+ // Turn on security
+ conf.set(CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+ UserGroupInformation.setConfiguration(conf);
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+ DelegationTokenIdentifier dtId = new DelegationTokenIdentifier(new Text(
+ ugi.getUserName()), null, null);
+ FSNamesystem namesystem = mock(FSNamesystem.class);
+ DelegationTokenSecretManager dtSecretManager = new DelegationTokenSecretManager(
+ 86400000, 86400000, 86400000, 86400000, namesystem);
+ dtSecretManager.startThreads();
+ Token token = new Token(
+ dtId, dtSecretManager);
+ token.setService(new Text("127.0.0.1:9071"));
+ token.setKind(WebHdfsFileSystem.TOKEN_KIND);
+ ugi.addToken(token);
+ final WebHdfsFileSystem webhdfs = (WebHdfsFileSystem) FileSystem.get(
+ URI.create(uri), conf);
+ String tokenString = token.encodeToUrlString();
+ Path fsPath = new Path("/");
+ URL renewTokenUrl = webhdfs.toUrl(PutOpParam.Op.RENEWDELEGATIONTOKEN,
+ fsPath, new TokenArgumentParam(tokenString));
+ URL cancelTokenUrl = webhdfs.toUrl(PutOpParam.Op.CANCELDELEGATIONTOKEN,
+ fsPath, new TokenArgumentParam(tokenString));
+ Assert.assertEquals(
+ generateUrlQueryPrefix(PutOpParam.Op.RENEWDELEGATIONTOKEN,
+ ugi.getUserName())
+ + "&token=" + tokenString, renewTokenUrl.getQuery());
+ Token delegationToken = new Token(
+ token);
+ delegationToken.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND);
+ Assert.assertEquals(
+ generateUrlQueryPrefix(PutOpParam.Op.CANCELDELEGATIONTOKEN,
+ ugi.getUserName())
+ + "&token="
+ + tokenString
+ + "&"
+ + DelegationParam.NAME
+ + "="
+ + delegationToken.encodeToUrlString(), cancelTokenUrl.getQuery());
+ }
+
+ private String generateUrlQueryPrefix(HttpOpParam.Op op, String username) {
+ return "op=" + op.toString() + "&user.name=" + username;
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
new file mode 100644
index 00000000000..9834cb74a45
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/resources/TestParam.java
@@ -0,0 +1,227 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.web.resources;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestParam {
+ public static final Log LOG = LogFactory.getLog(TestParam.class);
+
+ final Configuration conf = new Configuration();
+
+ @Test
+ public void testAccessTimeParam() {
+ final AccessTimeParam p = new AccessTimeParam(AccessTimeParam.DEFAULT);
+ Assert.assertEquals(-1L, p.getValue().longValue());
+
+ new AccessTimeParam(-1L);
+
+ try {
+ new AccessTimeParam(-2L);
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testBlockSizeParam() {
+ final BlockSizeParam p = new BlockSizeParam(BlockSizeParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+ Assert.assertEquals(
+ conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
+ DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT),
+ p.getValue(conf));
+
+ new BlockSizeParam(1L);
+
+ try {
+ new BlockSizeParam(0L);
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testBufferSizeParam() {
+ final BufferSizeParam p = new BufferSizeParam(BufferSizeParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+ Assert.assertEquals(
+ conf.getInt(CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_KEY,
+ CommonConfigurationKeysPublic.IO_FILE_BUFFER_SIZE_DEFAULT),
+ p.getValue(conf));
+
+ new BufferSizeParam(1);
+
+ try {
+ new BufferSizeParam(0);
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testDelegationParam() {
+ final DelegationParam p = new DelegationParam(DelegationParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+ }
+
+ @Test
+ public void testDestinationParam() {
+ final DestinationParam p = new DestinationParam(DestinationParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+
+ new DestinationParam("/abc");
+
+ try {
+ new DestinationParam("abc");
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testGroupParam() {
+ final GroupParam p = new GroupParam(GroupParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+ }
+
+ @Test
+ public void testModificationTimeParam() {
+ final ModificationTimeParam p = new ModificationTimeParam(ModificationTimeParam.DEFAULT);
+ Assert.assertEquals(-1L, p.getValue().longValue());
+
+ new ModificationTimeParam(-1L);
+
+ try {
+ new ModificationTimeParam(-2L);
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testOverwriteParam() {
+ final OverwriteParam p = new OverwriteParam(OverwriteParam.DEFAULT);
+ Assert.assertEquals(false, p.getValue());
+
+ new OverwriteParam("trUe");
+
+ try {
+ new OverwriteParam("abc");
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testOwnerParam() {
+ final OwnerParam p = new OwnerParam(OwnerParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+ }
+
+ @Test
+ public void testPermissionParam() {
+ final PermissionParam p = new PermissionParam(PermissionParam.DEFAULT);
+ Assert.assertEquals(new FsPermission((short)0755), p.getFsPermission());
+
+ new PermissionParam("0");
+
+ try {
+ new PermissionParam("-1");
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+
+ new PermissionParam("1777");
+
+ try {
+ new PermissionParam("2000");
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+
+ try {
+ new PermissionParam("8");
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+
+ try {
+ new PermissionParam("abc");
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testRecursiveParam() {
+ final RecursiveParam p = new RecursiveParam(RecursiveParam.DEFAULT);
+ Assert.assertEquals(false, p.getValue());
+
+ new RecursiveParam("falSe");
+
+ try {
+ new RecursiveParam("abc");
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+
+ @Test
+ public void testRenewerParam() {
+ final RenewerParam p = new RenewerParam(RenewerParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+ }
+
+ @Test
+ public void testReplicationParam() {
+ final ReplicationParam p = new ReplicationParam(ReplicationParam.DEFAULT);
+ Assert.assertEquals(null, p.getValue());
+ Assert.assertEquals(
+ (short)conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
+ DFSConfigKeys.DFS_REPLICATION_DEFAULT),
+ p.getValue(conf));
+
+ new ReplicationParam((short)1);
+
+ try {
+ new ReplicationParam((short)0);
+ Assert.fail();
+ } catch(IllegalArgumentException e) {
+ LOG.info("EXPECTED: " + e);
+ }
+ }
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
index 03657ab3473..e65b9009d5c 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/security/TestPermission.java
@@ -111,10 +111,18 @@ public class TestPermission extends TestCase {
FsPermission dirPerm = new FsPermission((short)0777);
fs.mkdirs(new Path("/a1/a2/a3"), dirPerm);
- checkPermission(fs, "/a1", inheritPerm);
- checkPermission(fs, "/a1/a2", inheritPerm);
+ checkPermission(fs, "/a1", dirPerm);
+ checkPermission(fs, "/a1/a2", dirPerm);
checkPermission(fs, "/a1/a2/a3", dirPerm);
+ dirPerm = new FsPermission((short)0123);
+ FsPermission permission = FsPermission.createImmutable(
+ (short)(dirPerm.toShort() | 0300));
+ fs.mkdirs(new Path("/aa/1/aa/2/aa/3"), dirPerm);
+ checkPermission(fs, "/aa/1", permission);
+ checkPermission(fs, "/aa/1/aa/2", permission);
+ checkPermission(fs, "/aa/1/aa/2/aa/3", dirPerm);
+
FsPermission filePerm = new FsPermission((short)0444);
FSDataOutputStream out = fs.create(new Path("/b1/b2/b3.txt"), filePerm,
true, conf.getInt("io.file.buffer.size", 4096),
@@ -126,7 +134,7 @@ public class TestPermission extends TestCase {
checkPermission(fs, "/b1/b2/b3.txt", filePerm);
conf.set(FsPermission.UMASK_LABEL, "022");
- FsPermission permission =
+ permission =
FsPermission.createImmutable((short)0666);
FileSystem.mkdirs(fs, new Path("/c1"), new FsPermission(permission));
FileSystem.create(fs, new Path("/c1/c2.txt"),
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java
index a0dfcb4b49b..709a5012bfb 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/HdfsTestDriver.java
@@ -43,14 +43,17 @@ public class HdfsTestDriver {
}
public void run(String argv[]) {
+ int exitCode = -1;
try {
- pgd.driver(argv);
+ exitCode = pgd.driver(argv);
} catch(Throwable e) {
e.printStackTrace();
}
+
+ System.exit(exitCode);
}
public static void main(String argv[]){
new HdfsTestDriver().run(argv);
}
-}
\ No newline at end of file
+}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
index f708c3e2930..3832aa07357 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/tools/TestDelegationTokenFetcher.java
@@ -20,7 +20,6 @@ import static org.junit.Assert.assertEquals;
import static org.junit.Assert.assertTrue;
import static org.mockito.Matchers.eq;
import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.verify;
import static org.mockito.Mockito.when;
import java.io.IOException;
@@ -37,7 +36,9 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifie
import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.Credentials;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
import org.junit.Before;
import org.junit.Test;
@@ -46,6 +47,7 @@ public class TestDelegationTokenFetcher {
private Configuration conf;
private URI uri;
private static final String SERVICE_VALUE = "localhost:2005";
+ private static final Text KIND = new Text("TESTING-TOKEN-KIND");
private static String tokenFile = "file.dta";
@Before
@@ -56,25 +58,59 @@ public class TestDelegationTokenFetcher {
FileSystemTestHelper.addFileSystemForTesting(uri, conf, dfs);
}
+ public static class FakeRenewer extends TokenRenewer {
+ static Token> lastRenewed = null;
+ static Token> lastCanceled = null;
+
+ @Override
+ public boolean handleKind(Text kind) {
+ return KIND.equals(kind);
+ }
+
+ @Override
+ public boolean isManaged(Token> token) throws IOException {
+ return true;
+ }
+
+ @Override
+ public long renew(Token> token, Configuration conf) {
+ lastRenewed = token;
+ return 0;
+ }
+
+ @Override
+ public void cancel(Token> token, Configuration conf) {
+ lastCanceled = token;
+ }
+
+ public static void reset() {
+ lastRenewed = null;
+ lastCanceled = null;
+ }
+ }
+
/**
* Verify that when the DelegationTokenFetcher runs, it talks to the Namenode,
* pulls out the correct user's token and successfully serializes it to disk.
*/
+ @SuppressWarnings("deprecation")
@Test
public void expectedTokenIsRetrievedFromDFS() throws Exception {
final byte[] ident = new DelegationTokenIdentifier(new Text("owner"),
new Text("renewer"), new Text("realuser")).getBytes();
final byte[] pw = new byte[] { 42 };
- final Text kind = new Text("MY-KIND");
final Text service = new Text(uri.toString());
+ final String user =
+ UserGroupInformation.getCurrentUser().getShortUserName();
// Create a token for the fetcher to fetch, wire NN to return it when asked
// for this particular user.
- Token t = new Token(
- ident, pw, kind, service);
- when(dfs.getDelegationToken((String) null)).thenReturn(t);
+ Token t =
+ new Token(ident, pw, KIND, service);
+ when(dfs.getDelegationToken(eq((String) null))).thenReturn(t);
when(dfs.renewDelegationToken(eq(t))).thenReturn(1000L);
when(dfs.getUri()).thenReturn(uri);
+ FakeRenewer.reset();
FileSystem fileSys = FileSystem.getLocal(conf);
try {
@@ -88,14 +124,13 @@ public class TestDelegationTokenFetcher {
assertEquals(t, itr.next());
assertTrue(!itr.hasNext());
- DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
- "--print", tokenFile });
- DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
- "--renew", tokenFile });
- DelegationTokenFetcher.main(new String[] { "-fs", uri.toString(),
- "--cancel", tokenFile });
- verify(dfs).renewDelegationToken(eq(t));
- verify(dfs).cancelDelegationToken(eq(t));
+ DelegationTokenFetcher.main(new String[] { "--print", tokenFile });
+ DelegationTokenFetcher.main(new String[] { "--renew", tokenFile });
+ assertEquals(t, FakeRenewer.lastRenewed);
+ FakeRenewer.reset();
+
+ DelegationTokenFetcher.main(new String[] { "--cancel", tokenFile });
+ assertEquals(t, FakeRenewer.lastCanceled);
} finally {
fileSys.delete(new Path(tokenFile), true);
}
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
new file mode 100644
index 00000000000..568cc80764c
--- /dev/null
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/resources/META-INF/services/org.apache.hadoop.security.token.TokenRenewer
@@ -0,0 +1 @@
+org.apache.hadoop.tools.TestDelegationTokenFetcher$FakeRenewer
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
index ea15f381cba..08e5d569a96 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestINodeFile.java
@@ -139,11 +139,11 @@ public class TestINodeFile {
assertEquals("f", inf.getFullPathName());
assertEquals("", inf.getLocalParentDir());
- dir.addChild(inf, false, false);
+ dir.addChild(inf, false);
assertEquals("d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals("d", inf.getLocalParentDir());
- root.addChild(dir, false, false);
+ root.addChild(dir, false);
assertEquals(Path.SEPARATOR+"d"+Path.SEPARATOR+"f", inf.getFullPathName());
assertEquals(Path.SEPARATOR+"d", dir.getFullPathName());
diff --git a/hadoop-mapreduce-project/CHANGES.txt b/hadoop-mapreduce-project/CHANGES.txt
index bb7242f29c7..5e7b03524ab 100644
--- a/hadoop-mapreduce-project/CHANGES.txt
+++ b/hadoop-mapreduce-project/CHANGES.txt
@@ -10,6 +10,8 @@ Trunk (unreleased changes)
(Plamen Jeliazkov via shv)
IMPROVEMENTS
+ MAPREDUCE-3008. Improvements to cumulative CPU emulation for short running
+ tasks in Gridmix. (amarrk)
MAPREDUCE-2887 due to HADOOP-7524 Change RPC to allow multiple protocols
including multuple versions of the same protocol (sanjay Radia)
@@ -20,6 +22,12 @@ Trunk (unreleased changes)
MAPREDUCE-2836. Provide option to fail jobs when submitted to non-existent
fair scheduler pools. (Ahmed Radwan via todd)
+ MAPREDUCE-3171. normalize nodemanager native code compilation with common/hdfs
+ native. (tucu)
+
+ MAPREDUCE-3149. Add a test to verify that TokenCache handles file system
+ uri with no authority. (John George via jitendra)
+
BUG FIXES
MAPREDUCE-2950. [Gridmix] TestUserResolve fails in trunk.
@@ -32,9 +40,17 @@ Trunk (unreleased changes)
findBugs, correct links to findBugs artifacts and no links to the
artifacts when there are no warnings. (Tom White via vinodkv).
- MAPREDUCE-3081. Fix vaidya startup script. (gkesavan via suhas).
+ MAPREDUCE-3183. hadoop-assemblies/src/main/resources/assemblies/hadoop-mapreduce-dist.xml
+ missing license header. (Hitesh Shah via tucu).
-Release 0.23.0 - Unreleased
+ MAPREDUCE-3003. Publish MR JARs to Maven snapshot repository. (tucu)
+
+ MAPREDUCE-3204. mvn site:site fails on MapReduce. (tucu)
+
+ MAPREDUCE-3014. Rename and invert logic of '-cbuild' profile to 'native' and off
+ by default. (tucu)
+
+Release 0.23.0 - 2011-11-01
INCOMPATIBLE CHANGES
@@ -75,12 +91,26 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2037. Capture intermediate progress, CPU and memory usage for
tasks. (Dick King via acmurthy)
+ MAPREDUCE-279. MapReduce 2.0. Merging MR-279 branch into trunk. Contributed by
+ Arun C Murthy, Christopher Douglas, Devaraj Das, Greg Roelofs, Jeffrey
+ Naisbitt, Josh Wills, Jonathan Eagles, Krishna Ramachandran, Luke Lu, Mahadev
+ Konar, Robert Evans, Sharad Agarwal, Siddharth Seth, Thomas Graves, and Vinod
+ Kumar Vavilapalli.
+
MAPREDUCE-2930. Added the ability to be able to generate graphs from the
state-machine definitions. (Binglin Chang via vinodkv)
MAPREDUCE-2719. Add a simple, DistributedShell, application to illustrate
alternate frameworks on YARN. (Hitesh Shah via acmurthy)
+ MAPREDUCE-3104. Implemented Application-acls. (vinodkv)
+
+ MAPREDUCE-2708. Designed and implemented MR Application Master recovery to
+ make MR AMs resume their progress after restart. (Sharad Agarwal via vinodkv)
+
+ MAPREDUCE-2858. Added a WebApp Proxy for applications. (Robert Evans via
+ acmurthy)
+
IMPROVEMENTS
MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
@@ -318,6 +348,9 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2726. Added job-file to the AM and JobHistoryServer web
interfaces. (Jeffrey Naisbitt via vinodkv)
+ MAPREDUCE-2880. Improve classpath-construction for mapreduce AM and
+ containers. (Arun C Murthy via vinodkv)
+
MAPREDUCE-3055. Simplified ApplicationAttemptId passing to
ApplicationMaster via environment variable. (vinodkv)
@@ -338,9 +371,6 @@ Release 0.23.0 - Unreleased
MAPREDUCE-3134. Added documentation the CapacityScheduler. (acmurthy)
- MAPREDUCE-3138. Add a utility to help applications bridge changes in
- Context Objects APIs due to MAPREDUCE-954. (omalley via acmurthy)
-
MAPREDUCE-3013. Removed YarnConfiguration.YARN_SECURITY_INFO and its usage
as it doesn't affect security any more. (vinodkv)
@@ -351,9 +381,68 @@ Release 0.23.0 - Unreleased
the outputs of tasks from a crashed job so as to support MR Application
Master recovery. (Sharad Agarwal and Arun C Murthy via vinodkv)
- MAPREDUCE-2738. Added the missing cluster level statisticss on the RM web
+ MAPREDUCE-2738. Added the missing cluster level statistics on the RM web
UI. (Robert Joseph Evans via vinodkv)
+ MAPREDUCE-2988. Reenabled TestLinuxContainerExecutor reflecting the
+ current NodeManager code. (Robert Joseph Evans via vinodkv)
+
+ MAPREDUCE-3161. Improved some javadocs and fixed some typos in
+ YARN. (Todd Lipcon via vinodkv)
+
+ MAPREDUCE-3148. Ported MAPREDUCE-2702 to old mapred api for aiding task
+ recovery. (acmurthy)
+
+ MAPREDUCE-3133. Running a set of methods in a Single Test Class.
+ (Jonathan Eagles via mahadev)
+
+ MAPREDUCE-3059. QueueMetrics do not have metrics for aggregate
+ containers-allocated and aggregate containers-released.
+ (Devaraj K via mahadev)
+
+ MAPREDUCE-3187. Add names for various unnamed threads in MR2.
+ (Todd Lipcon and Siddharth Seth via mahadev)
+
+ MAPREDUCE-3136. Added documentation for setting up Hadoop clusters in both
+ non-secure and secure mode for both HDFS & YARN. (acmurthy)
+
+ MAPREDUCE-3068. Added a whitelist of environment variables for containers
+ from the NodeManager and set MALLOC_ARENA_MAX for all daemons and
+ containers. (Chris Riccomini via acmurthy)
+
+ MAPREDUCE-3144. Augmented JobHistory with the information needed for
+ serving aggregated logs. (Siddharth Seth via vinodkv)
+
+ MAPREDUCE-3163. JobClient spews errors when killing MR2 job.
+ (mahadev)
+
+ MAPREDUCE-3239. Use new createSocketAddr API in MRv2 to give better
+ error messages on misconfig (Todd Lipcon via mahadev)
+
+ MAPREDUCE-2747. Cleaned up LinuxContainerExecutor binary sources and changed
+ the configuration to use yarn names. (Robert Joseph Evans via vinodkv)
+
+ MAPREDUCE-3205. Fix memory specifications to be physical rather than
+ virtual, allowing for a ratio between the two to be configurable. (todd
+ via acmurthy)
+
+ MAPREDUCE-2986. Fixed MiniYARNCluster to support multiple NodeManagers.
+ (Anupam Seth via vinodkv)
+
+ MAPREDUCE-2736. Remove unused contrib components dependent on MR1. (eli)
+
+ MAPREDUCE-2989. Modified JobHistory to link to task and AM logs from the
+ JobHistoryServer. (Siddharth Seth via vinodkv)
+
+ MAPREDUCE-3146. Added a MR specific command line to dump logs for a
+ given TaskAttemptID. (Siddharth Seth via vinodkv)
+
+ MAPREDUCE-3275. Added documentation for AM WebApp Proxy. (Robert Evans via
+ acmurthy)
+
+ MAPREDUCE-3322. Added a better index.html and an brief overview of YARN
+ architecture. (acmurthy)
+
OPTIMIZATIONS
MAPREDUCE-2026. Make JobTracker.getJobCounters() and
@@ -365,8 +454,8 @@ Release 0.23.0 - Unreleased
MAPREDUCE-901. Efficient framework counters. (llu via acmurthy)
- MAPREDUCE-2880. Improve classpath-construction for mapreduce AM and
- containers. (Arun C Murthy via vinodkv)
+ MAPREDUCE-2629. Workaround a JVM class loading quirk which prevents
+ JIT compilation of inner classes methods in ReduceContextImpl.
BUG FIXES
@@ -1529,6 +1618,300 @@ Release 0.23.0 - Unreleased
MAPREDUCE-2913. Fixed TestMRJobs.testFailingMapper to assert the correct
TaskCompletionEventStatus. (Jonathan Eagles via vinodkv)
+ MAPREDUCE-2794. [MR-279] Incorrect metrics value for AvailableGB per
+ queue per user. (John George via mahadev)
+
+ MAPREDUCE-2783. Fixing RM web-UI to show no tracking-URL when AM
+ crashes. (Eric Payne via vinodkv)
+
+ MAPREDUCE-3141. Fix the broken MRAppMaster to work over YARN in security
+ mode.(vinodkv)
+
+ MAPREDUCE-2751. Modified NodeManager to stop leaving around local files
+ after application finishes. (Siddharth Seth via vinodkv)
+
+ MAPREDUCE-3033. Ensure Master interface pays attention to classic v/s yarn
+ frameworks. (Hitesh Shah via acmurthy)
+
+ MAPREDUCE-2802. Ensure JobHistory filenames have jobId. (Jonathan Eagles
+ via acmurthy)
+
+ MAPREDUCE-2876. Use a different config for ContainerAllocationExpirer.
+ (Anupam Seth via acmurthy)
+
+ MAPREDUCE-3153. Fix TestFileOutputCommitter which was broken by
+ MAPREDUCE-2702. (mahadev via acmurthy)
+
+ MAPREDUCE-3123. Fix NM to quote symlink names to escape special
+ characters. (Hitesh Shah via acmurthy)
+
+ MAPREDUCE-3154. Fix JobSubmitter to check for output specs before copying
+ job submission files to fail fast. (Abhijit Suresh Shingate via acmurthy)
+
+ MAPREDUCE-3158. Fix test failures in MRv1 due to default framework being
+ set to yarn. (Hitesh Shah via acmurthy)
+
+ MAPREDUCE-3167. container-executor is not being packaged with the assembly
+ target. (mahadev)
+
+ MAPREDUCE-3020. Fixed TaskAttemptImpl to log the correct node-address for
+ a finished Reduce task. (Chackaravarthy via vinodkv)
+
+ MAPREDUCE-2668. Fixed AuxServices to send a signal on application-finish
+ to all the services. (Thomas Graves via vinodkv)
+
+ MAPREDUCE-3126. Fixed a corner case in CapacityScheduler where headroom
+ wasn't updated on changes to cluster size. (acmurthy)
+
+ MAPREDUCE-3140. Fixed the invalid JobHistory URL for failed
+ applications. (Subroto Sanyal via vinodkv)
+
+ MAPREDUCE-3125. Modified TaskImpl to consider only non-failed, non-killed
+ task-attempts for obtaining task's progress. (Hitesh Shah via vinodkv)
+
+ MAPREDUCE-2666. Retrieve shuffle port number from JobHistory on MR AM
+ restart. (Jonathan Eagles via acmurthy)
+
+ MAPREDUCE-2789. Complete schedulingInfo on CLI. (Eric Payne via acmurthy)
+
+ MAPREDUCE-3170. Fixed job output commit for deep hierarchies. (Hitesh Shah
+ via acmurthy)
+
+ MAPREDUCE-3124. Fixed location of native libs i.e. libhadoop.so for
+ containers. (John George via acmurthy)
+
+ MAPREDUCE-3057. Job History Server goes of OutOfMemory with 1200 Jobs
+ and Heap Size set to 10 GB. (Eric Payne via mahadev)
+
+ MAPREDUCE-2840. mr279 TestUberAM.testSleepJob test fails. (jonathan eagles
+ via mahadev)
+
+ MAPREDUCE-3190. Ensure bin/yarn fails early with a clear error message
+ when HADOOP_COMMON_HOME or HADOOP_HDFS_HOME are not set. (todd & acmurthy
+ via acmurthy)
+
+ MAPREDUCE-3189. Add link decoration back to MR2's CSS. (Todd Lipcon via
+ mahadev)
+
+ MAPREDUCE-3127. Changed default value of yarn.resourcemanager.acl.enable
+ to true and added some more documentation. (acmurthy)
+
+ MAPREDUCE-3032. Fixed TaskAttemptImpl so that JobHistory can have error
+ information about failed tasks. (Devaraj K via vinodkv)
+
+ MAPREDUCE-3196. TestLinuxContainerExecutorWithMocks fails on Mac OSX.
+ (Arun Murthy via mahadev)
+
+ MAPREDUCE-3197. TestMRClientService failing on building clean checkout of
+ branch 0.23 (mahadev)
+
+ MAPREDUCE-2762. Cleanup MR staging directory on completion. (mahadev via
+ acmurthy)
+
+ MAPREDUCE-3165. Ensure logging options are set correctly for MR AM and
+ tasks. (todd via acmurthy)
+
+ MAPREDUCE-3203. Fix some javac warnings in MRAppMaster. (mahadev)
+
+ MAPREDUCE-3199. Fixed pom files to include correct log4j configuration for
+ tests. (vinodkv)
+
+ MAPREDUCE-3162. Separated application-init and container-init event types
+ in NodeManager's Application state machine. (Todd Lipcon via vinodkv)
+
+ MAPREDUCE-3176. Fixed ant mapreduce tests that are timing out because
+ of wrong framework name. (Hitesh Shah via vinodkv)
+
+ MAPREDUCE-3181. Fixed MapReduce runtime to load yarn-default.xml and
+ yarn-site.xml. (acmurthy)
+
+ MAPREDUCE-2788. Normalize resource requests in FifoScheduler
+ appropriately. (Ahmed Radwan via acmurthy)
+
+ MAPREDUCE-2693. Fix NPE in job-blacklisting. (Hitesh Shah via acmurthy)
+
+ MAPREDUCE-3208. Fix NPE task/container log appenders. (liangzhwa via
+ acmurthy)
+
+ MAPREDUCE-3212. Fix usage/help message for bin/yarn. (Bhallamudi Venkata
+ Siva Kamesh via acmurthy)
+
+ MAPREDUCE-3179. Ensure failed tests exit with right error code. (Jonathan
+ Eagles via acmurthy)
+
+ MAPREDUCE-3188. Ensure correct shutdown in services. (todd via acmurthy)
+
+ MAPREDUCE-3226. Fix shutdown of fetcher threads. (vinodkv via acmurthy)
+
+ MAPREDUCE-3070. Fix NodeManager to use ephemeral ports by default.
+ (Devaraj K via acmurthy)
+
+ MAPREDUCE-3242. Trunk compilation broken with bad interaction from
+ MAPREDUCE-3070 and MAPREDUCE-3239. (mahadev)
+
+ MAPREDUCE-3058. Fixed MR YarnChild to report failure when task throws an
+ error and thus prevent a hanging task and job. (vinodkv)
+
+ MAPREDUCE-3087. Fixed the mapreduce classpath to correctly include the
+ generated-classpath file needed for tests. (Ravi Prakash via vinodkv)
+
+ MAPREDUCE-3233. Fixed a bug in MR Job so as to be able to restart the
+ application on AM crash. (Mahadev Konar via vinodkv)
+
+ MAPREDUCE-3028. Added job-end notification support. (Ravi Prakash via
+ acmurthy)
+
+ MAPREDUCE-3249. Ensure shuffle-port is correctly used duringMR AM recovery.
+ (vinodkv via acmurthy)
+
+ MAPREDUCE-3252. Fix map tasks to not rewrite data an extra time when
+ map output fits in spill buffer. (todd)
+
+ MAPREDUCE-3159. Ensure DefaultContainerExecutor doesn't delete application
+ directories during app-init. (todd via acmurthy)
+
+ MAPREDUCE-3248. Fixed log4j properties. (vinodkv via acmurthy)
+
+ MAPREDUCE-2746. Yarn servers can't communicate with each other with
+ hadoop.security.authorization set to true (acmurthy via mahadev)
+
+ MAPREDUCE-2821. Added missing fields (resourcePerMap & resourcePerReduce)
+ to JobSummary logs. (mahadev via acmurthy)
+
+ MAPREDUCE-3253. Fixed ContextFactory to clone JobContext correctly.
+ (acmurthy)
+
+ MAPREDUCE-3263. Fixed the MAPREDUCE-3028 commit which broke MR1. (Hitesh
+ Shah via acmurthy)
+
+ MAPREDUCE-3269. Fixed log4j properties to correctly set logging options
+ for JobHistoryServer vis-a-vis JobSummary logs. (mahadev via acmurthy)
+
+ MAPREDUCE-2977. Fix ResourceManager to renew HDFS delegation tokens for
+ applications. (acmurthy)
+
+ MAPREDUCE-3250. When AM restarts, client keeps reconnecting to the new AM
+ and prints a lots of logs. (vinodkv via mahadev)
+
+ MAPREDUCE-3254. Fixed streaming to set the job.jar by using the right
+ JobConf ctor. (acmurthy)
+
+ MAPREDUCE-3264. mapreduce.job.user.name needs to be set automatically.
+ (acmurthy via mahadev)
+
+ MAPREDUCE-3175. Add authorization to admin web-pages such as /stacks, /jmx
+ etc. (Jonathan Eagles via acmurthy)
+
+ MAPREDUCE-3257. Added authorization checks for the protocol between
+ ResourceManager and ApplicationMaster. (vinodkv via acmurthy)
+
+ MAPREDUCE-3259. Added java.library.path of NodeManager to
+ ContainerLocalizer in LinuxContainerExecutor. (Kihwal Lee via acmurthy)
+
+ MAPREDUCE-3279. Fixed TestJobHistoryParsing which assumed user name to be
+ mapred all the time. (Siddharth Seth via acmurthy)
+
+ MAPREDUCE-3240. Fixed NodeManager to be able to forcefully cleanup its
+ containers (process-trees) irrespective of whether the container succeeded,
+ or killed. (Hitesh Shah via vinodkv)
+
+ MAPREDUCE-3281. Fixed a bug in TestLinuxContainerExecutorWithMocks. (vinodkv)
+
+ MAPREDUCE-3228. Fixed MR AM to timeout RPCs to bad NodeManagers. (vinodkv
+ via acmurthy)
+
+ MAPREDUCE-3284. Moved JobQueueClient to hadoop-mapreduce-client-core.
+ (acmurthy)
+
+ MAPREDUCE-3282. bin/mapred job -list throws exception. (acmurthy via
+ mahadev)
+
+ MAPREDUCE-3186. User jobs are getting hanged if the Resource manager
+ process goes down and comes up while job is getting executed.
+ (Eric Payne via mahadev)
+
+ MAPREDUCE-3209. Jenkins reports 160 FindBugs warnings (mahadev)
+
+ MAPREDUCE-3258. Fixed AM & JobHistory web-ui to display counters properly.
+ (Siddharth Seth via acmurthy)
+
+ MAPREDUCE-3290. Fixed a NPE in ClientRMService. (acmurthy)
+
+ MAPREDUCE-3185. RM Web UI does not sort the columns in some cases.
+ (Jonathan Eagles via mahadev)
+
+ MAPREDUCE-3292. In secure mode job submission fails with Provider
+ org.apache.hadoop.mapreduce.security.token.JobTokenIndentifier$Renewer
+ not found. (mahadev)
+
+ MAPREDUCE-3296. Fixed the remaining nine FindBugs warnings. (vinodkv)
+
+ MAPREDUCE-2775. Fixed ResourceManager and NodeManager to force a
+ decommissioned node to shutdown. (Devaraj K via vinodkv)
+
+ MAPREDUCE-3304. Fixed intermittent test failure due to a race in
+ TestRMContainerAllocator#testBlackListedNodes. (Ravi Prakash via acmurthy)
+
+ MAPREDUCE-3306. Fixed a bug in NodeManager ApplicationImpl that was causing
+ NodeManager to crash. (vinodkv)
+
+ MAPREDUCE-3256. Added authorization checks for the protocol between
+ NodeManager and ApplicationMaster. (vinodkv via acmurthy)
+
+ MAPREDUCE-3274. Fixed a race condition in MRAppMaster that was causing a
+ task-scheduling deadlock. (Robert Joseph Evans via vinodkv)
+
+ MAPREDUCE-3313. Fixed initialization of ClusterMetrics which was failing
+ TestResourceTrackerService sometimes. (Hitesh Shah via vinodkv)
+
+ MAPREDUCE-2766. Fixed NM to set secure permissions for files and directories
+ in distributed-cache. (Hitesh Shah via vinodkv)
+
+ MAPREDUCE-2696. Fixed NodeManager to cleanup logs in a thread when logs'
+ aggregation is not enabled. (Siddharth Seth via vinodkv)
+
+ MAPREDUCE-3262. Fixed Container's state-machine in NodeManager to handle
+ a couple of events in failure states correctly. (Hitesh Shah and Siddharth
+ Seth via vinodkv)
+
+ MAPREDUCE-3035. Fixed MR JobHistory to ensure rack information is present.
+ (chakravarthy via acmurthy)
+
+ MAPREDUCE-3321. Disabled a few MR tests for 0.23. (Hitesh Shah via
+ acmurthy)
+
+ MAPREDUCE-3220. Fixed TestCombineOutputCollector. (Devaraj K via acmurthy)
+
+ MAPREDUCE-3103. Implement Job ACLs for MRAppMaster.
+ (mahadev)
+
+ MAPREDUCE-3241. [Rumen] Fix Rumen to ignore the AMStartedEvent. (amarrk)
+
+ MAPREDUCE-3166. [Rumen] Make Rumen use job history api instead of relying
+ on current history file name format. (Ravi Gummadi)
+
+ MAPREDUCE-3157. [Rumen] Fix TraceBuilder to handle 0.20 history file
+ names also. (Ravi Gummadi)
+
+ MAPREDUCE-3081. Fix vaidya startup script. (gkesavan via suhas).
+
+ MAPREDUCE-2764. Fix renewal of dfs delegation tokens. (Owen via jitendra)
+
+ MAPREDUCE-3192. Fix Javadoc warning in JobClient.java and Cluster.java.
+ (jitendra)
+
+ MAPREDUCE-3237. Move LocalJobRunner to hadoop-mapreduce-client-core.
+ (tomwhite via acmurthy)
+
+ MAPREDUCE-3316. Rebooted link is not working properly.
+ (Bhallamudi Venkata Siva Kamesh via mahadev)
+
+ MAPREDUCE-3317. Rumen TraceBuilder is emiting null as hostname.
+ (Ravi Gummadi via mahadev)
+
+ MAPREDUCE-3332. contrib/raid compile breaks due to changes in hdfs/protocol/datatransfer/
+ Sender#writeBlock related to checksum handling (Hitesh Shah via mahadev)
+
Release 0.22.0 - Unreleased
INCOMPATIBLE CHANGES
@@ -1745,6 +2128,9 @@ Release 0.22.0 - Unreleased
MAPREDUCE-2505. Explain how to use ACLs in the fair scheduler.
(matei via eli)
+ MAPREDUCE-3138. Add a utility to help applications bridge changes in
+ Context Objects APIs due to MAPREDUCE-954. (omalley via acmurthy)
+
OPTIMIZATIONS
MAPREDUCE-1354. Enhancements to JobTracker for better performance and
diff --git a/hadoop-mapreduce-project/INSTALL b/hadoop-mapreduce-project/INSTALL
index 16db5b6dbee..e6de8cb92e4 100644
--- a/hadoop-mapreduce-project/INSTALL
+++ b/hadoop-mapreduce-project/INSTALL
@@ -2,49 +2,31 @@ To compile Hadoop Mapreduce next following, do the following:
Step 1) Install dependencies for yarn
-See http://svn.apache.org/repos/asf/hadoop/common/trunk/hadoop-mapreduce/hadoop-yarn/README
+See http://svn.apache.org/repos/asf/hadoop/common/trunk/hadoop-mapreduce-porject/hadoop-yarn/README
Make sure protbuf library is in your library path or set: export LD_LIBRARY_PATH=/usr/local/lib
Step 2) Checkout
svn checkout http://svn.apache.org/repos/asf/hadoop/common/trunk
-Step 3) Build common
+Step 3) Build
-Go to common directory - choose your regular common build command
-Example: mvn clean install package -Pbintar -DskipTests
+Go to common directory - choose your regular common build command. For example:
-Step 4) Build HDFS
-
-Go to hdfs directory
-ant veryclean mvn-install -Dresolvers=internal
-
-Step 5) Build yarn and mapreduce
-
-Go to mapreduce directory
export MAVEN_OPTS=-Xmx512m
+mvn clean package -Pdist -Dtar -DskipTests -Pnative
-mvn clean install assembly:assembly -DskipTests
+You can omit -Pnative it you don't want to build native packages.
-Copy in build.properties if appropriate - make sure eclipse.home not set
-ant veryclean tar -Dresolvers=internal
+Step 4) Untar the tarball from hadoop-dist/target/ into a clean and different
+directory, say YARN_HOME.
-You will see a tarball in
-ls target/hadoop-mapreduce-0.24.0-SNAPSHOT-all.tar.gz
-
-Step 6) Untar the tarball in a clean and different directory.
-say YARN_HOME.
-
-Make sure you aren't picking up avro-1.3.2.jar, remove:
- $HADOOP_COMMON_HOME/share/hadoop/common/lib/avro-1.3.2.jar
- $YARN_HOME/lib/avro-1.3.2.jar
-
-Step 7)
-Install hdfs/common and start hdfs
+Step 5)
+Start hdfs
To run Hadoop Mapreduce next applications:
-Step 8) export the following variables to where you have things installed:
+Step 6) export the following variables to where you have things installed:
You probably want to export these in hadoop-env.sh and yarn-env.sh also.
export HADOOP_MAPRED_HOME=
@@ -54,7 +36,7 @@ export YARN_HOME=directory where you untarred yarn
export HADOOP_CONF_DIR=
export YARN_CONF_DIR=$HADOOP_CONF_DIR
-Step 9) Setup config: for running mapreduce applications, which now are in user land, you need to setup nodemanager with the following configuration in your yarn-site.xml before you start the nodemanager.
+Step 7) Setup config: for running mapreduce applications, which now are in user land, you need to setup nodemanager with the following configuration in your yarn-site.xml before you start the nodemanager.
yarn.nodemanager.aux-servicesmapreduce.shuffle
@@ -65,31 +47,21 @@ Step 9) Setup config: for running mapreduce applications, which now are in user
org.apache.hadoop.mapred.ShuffleHandler
-Step 10) Modify mapred-site.xml to use yarn framework
+Step 8) Modify mapred-site.xml to use yarn framework
mapreduce.framework.nameyarn
-Step 11) Create the following symlinks in $HADOOP_COMMON_HOME/share/hadoop/common/lib
+Step 9) cd $YARN_HOME
-ln -s $YARN_HOME/modules/hadoop-mapreduce-client-app-0.24.0-SNAPSHOT.jar .
-ln -s $YARN_HOME/modules/hadoop-yarn-api-0.24.0-SNAPSHOT.jar .
-ln -s $YARN_HOME/modules/hadoop-mapreduce-client-common-0.24.0-SNAPSHOT.jar .
-ln -s $YARN_HOME/modules/hadoop-yarn-common-0.24.0-SNAPSHOT.jar .
-ln -s $YARN_HOME/modules/hadoop-mapreduce-client-core-0.24.0-SNAPSHOT.jar .
-ln -s $YARN_HOME/modules/hadoop-yarn-server-common-0.24.0-SNAPSHOT.jar .
-ln -s $YARN_HOME/modules/hadoop-mapreduce-client-jobclient-0.24.0-SNAPSHOT.jar .
+Step 10) bin/yarn-daemon.sh start resourcemanager
-Step 12) cd $YARN_HOME
+Step 11) bin/yarn-daemon.sh start nodemanager
-Step 13) bin/yarn-daemon.sh start resourcemanager
+Step 12) bin/yarn-daemon.sh start historyserver
-Step 14) bin/yarn-daemon.sh start nodemanager
-
-Step 15) bin/yarn-daemon.sh start historyserver
-
-Step 16) You are all set, an example on how to run a mapreduce job is:
+Step 13) You are all set, an example on how to run a mapreduce job is:
cd $HADOOP_MAPRED_HOME
ant examples -Dresolvers=internal
$HADOOP_COMMON_HOME/bin/hadoop jar $HADOOP_MAPRED_HOME/build/hadoop-mapreduce-examples-0.24.0-SNAPSHOT.jar randomwriter -Dmapreduce.job.user.name=$USER -Dmapreduce.clientfactory.class.name=org.apache.hadoop.mapred.YarnClientFactory -Dmapreduce.randomwriter.bytespermap=10000 -Ddfs.blocksize=536870912 -Ddfs.block.size=536870912 -libjars $YARN_HOME/modules/hadoop-mapreduce-client-jobclient-0.24.0-SNAPSHOT.jar output
diff --git a/hadoop-mapreduce-project/assembly/all.xml b/hadoop-mapreduce-project/assembly/all.xml
index 32c9a799ceb..e69de29bb2d 100644
--- a/hadoop-mapreduce-project/assembly/all.xml
+++ b/hadoop-mapreduce-project/assembly/all.xml
@@ -1,101 +0,0 @@
-
- all
-
- tar.gz
-
- true
-
-
-
- hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/target/classes/bin
- bin
-
- container-executor
-
- 0755
-
-
- hadoop-yarn/bin
- bin
-
- *
-
- 0755
-
-
- bin
- bin
-
- *
-
- 0755
-
-
- hadoop-yarn/conf
- conf
-
- **/*
-
-
-
- sources
-
- **/*.jar
- **/target/**
-
- **/bin/*
- **/scripts/*
-
- **/dt-*/images/**
-
- **/file:/**
- **/SecurityAuth.audit*
-
-
- assembly/**
- pom.xml
- build*.xml
- ivy.xml
- ivy/**
- INSTALL
- LICENSE.txt
- mr-client/**
- hadoop-yarn/**
- src/**
-
-
-
- sources
-
- **/bin/*
- **/scripts/*
-
- 0755
-
-
-
-
-
- org.apache.hadoop:hadoop-yarn-server-tests
-
-
- modules
- false
- false
-
-
-
-
-
- false
- /lib
-
-
- org.apache.hadoop:hadoop-common
- org.apache.hadoop:hadoop-hdfs
-
-
-
-
diff --git a/hadoop-mapreduce-project/bin/mapred b/hadoop-mapreduce-project/bin/mapred
index ba7298021fb..e5e9efb4137 100755
--- a/hadoop-mapreduce-project/bin/mapred
+++ b/hadoop-mapreduce-project/bin/mapred
@@ -115,5 +115,12 @@ if [ "$COMMAND" = "classpath" ] ; then
exit
fi
+#turn security logger on the jobtracker
+if [ $COMMAND = "jobtracker" ]; then
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,DRFAS}"
+else
+ HADOOP_OPTS="$HADOOP_OPTS -Dhadoop.security.logger=${HADOOP_SECURITY_LOGGER:-INFO,NullAppender}"
+fi
+
export CLASSPATH
exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS $CLASS "$@"
diff --git a/hadoop-mapreduce-project/bin/start-mapred.sh b/hadoop-mapreduce-project/bin/start-mapred.sh
deleted file mode 100755
index d511aacbc2b..00000000000
--- a/hadoop-mapreduce-project/bin/start-mapred.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Start hadoop map reduce daemons. Run this on master node.
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-if [ -e $bin/../libexec/mapred-config.sh ]; then
- . $bin/../libexec/mapred-config.sh
-else
- . "$bin/mapred-config.sh"
-fi
-
-
-# start mapred daemons
-# start jobtracker first to minimize connection errors at startup
-"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/mapred start jobtracker
-"$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/mapred start tasktracker
diff --git a/hadoop-mapreduce-project/bin/stop-mapred.sh b/hadoop-mapreduce-project/bin/stop-mapred.sh
deleted file mode 100755
index 471593eb4b7..00000000000
--- a/hadoop-mapreduce-project/bin/stop-mapred.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-
-# Stop hadoop map reduce daemons. Run this on master node.
-
-bin=`dirname "${BASH_SOURCE-$0}"`
-bin=`cd "$bin"; pwd`
-
-if [ -e $bin/../libexec/mapred-config.sh ]; then
- . $bin/../libexec/mapred-config.sh
-else
- . "$bin/mapred-config.sh"
-fi
-
-"$HADOOP_PREFIX"/bin/hadoop-daemon.sh --config $HADOOP_CONF_DIR --script "$bin"/mapred stop jobtracker
-"$HADOOP_PREFIX"/bin/hadoop-daemons.sh --config $HADOOP_CONF_DIR --script "$bin"/mapred stop tasktracker
diff --git a/hadoop-mapreduce-project/build.xml b/hadoop-mapreduce-project/build.xml
index 9d78196476c..16f02a3742e 100644
--- a/hadoop-mapreduce-project/build.xml
+++ b/hadoop-mapreduce-project/build.xml
@@ -39,7 +39,6 @@
-
@@ -240,17 +239,6 @@
-
-
-
-
-
-
-
-
-
-
-
@@ -846,8 +834,7 @@
-
-
+
@@ -880,10 +867,8 @@
-
-
@@ -912,17 +897,13 @@
-
-
-
-
-
@@ -1042,8 +1022,7 @@
+ MapReduce. See also the javadoc-dev target. -->
@@ -1155,7 +1134,6 @@
-
@@ -1196,10 +1174,6 @@
-
-
-
-
@@ -1237,7 +1211,6 @@
-
@@ -1248,290 +1221,6 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
@@ -1742,7 +1431,6 @@
-
@@ -2090,32 +1778,16 @@
output="${build.dir.eclipse-tools-classes}" />
-
-
-
-
-
-
-
-
+ */
+public class JobEndNotifier implements Configurable {
+ private static final String JOB_ID = "$jobId";
+ private static final String JOB_STATUS = "$jobStatus";
+
+ private Configuration conf;
+ protected String userUrl;
+ protected int numTries; //Number of tries to attempt notification
+ protected int waitInterval; //Time to wait between retrying notification
+ protected URL urlToNotify; //URL to notify read from the config
+
+ /**
+ * Parse the URL that needs to be notified of the end of the job, along
+ * with the number of retries in case of failure and the amount of time to
+ * wait between retries
+ * @param conf the configuration
+ */
+ public void setConf(Configuration conf) {
+ this.conf = conf;
+
+ numTries = Math.min(
+ conf.getInt(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, 0) + 1
+ , conf.getInt(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, 1)
+ );
+ waitInterval = Math.min(
+ conf.getInt(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, 5)
+ , conf.getInt(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, 5)
+ );
+ waitInterval = (waitInterval < 0) ? 5 : waitInterval;
+
+ userUrl = conf.get(MRJobConfig.MR_JOB_END_NOTIFICATION_URL);
+ }
+
+ public Configuration getConf() {
+ return conf;
+ }
+
+ /**
+ * Notify the URL just once. Use best effort. Timeout hard coded to 5
+ * seconds.
+ */
+ protected boolean notifyURLOnce() {
+ boolean success = false;
+ try {
+ Log.info("Job end notification trying " + urlToNotify);
+ URLConnection conn = urlToNotify.openConnection();
+ conn.setConnectTimeout(5*1000);
+ conn.setReadTimeout(5*1000);
+ conn.setAllowUserInteraction(false);
+ InputStream is = conn.getInputStream();
+ conn.getContent();
+ is.close();
+ success = true;
+ Log.info("Job end notification to " + urlToNotify + " succeeded");
+ } catch(IOException ioe) {
+ Log.warn("Job end notification to " + urlToNotify + " failed", ioe);
+ }
+ return success;
+ }
+
+ /**
+ * Notify a server of the completion of a submitted job. The server must have
+ * configured MRConfig.JOB_END_NOTIFICATION_URLS
+ * @param jobReport JobReport used to read JobId and JobStatus
+ * @throws InterruptedException
+ */
+ public void notify(JobReport jobReport)
+ throws InterruptedException {
+ // Do we need job-end notification?
+ if (userUrl == null) {
+ Log.info("Job end notification URL not set, skipping.");
+ return;
+ }
+
+ //Do string replacements for jobId and jobStatus
+ if (userUrl.contains(JOB_ID)) {
+ userUrl = userUrl.replace(JOB_ID, jobReport.getJobId().toString());
+ }
+ if (userUrl.contains(JOB_STATUS)) {
+ userUrl = userUrl.replace(JOB_STATUS, jobReport.getJobState().toString());
+ }
+
+ // Create the URL, ensure sanity
+ try {
+ urlToNotify = new URL(userUrl);
+ } catch (MalformedURLException mue) {
+ Log.warn("Job end notification couldn't parse " + userUrl, mue);
+ return;
+ }
+
+ // Send notification
+ boolean success = false;
+ while (numTries-- > 0 && !success) {
+ Log.info("Job end notification attempts left " + numTries);
+ success = notifyURLOnce();
+ if (!success) {
+ Thread.sleep(waitInterval);
+ }
+ }
+ if (!success) {
+ Log.warn("Job end notification failed to notify : " + urlToNotify);
+ } else {
+ Log.info("Job end notification succeeded for " + jobReport.getJobId());
+ }
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
index be9a3776117..7a6b86a0f80 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java
@@ -23,6 +23,8 @@ import java.io.IOException;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.security.PrivilegedExceptionAction;
+import java.util.LinkedList;
+import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
@@ -31,17 +33,27 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.FileOutputCommitter;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.LocalContainerLauncher;
import org.apache.hadoop.mapred.TaskAttemptListenerImpl;
import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.AMStartedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
@@ -70,17 +82,20 @@ import org.apache.hadoop.mapreduce.v2.app.speculate.Speculator;
import org.apache.hadoop.mapreduce.v2.app.speculate.SpeculatorEvent;
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanerImpl;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.SystemClock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.event.AsyncDispatcher;
import org.apache.hadoop.yarn.event.Dispatcher;
@@ -108,19 +123,27 @@ import org.apache.hadoop.yarn.util.ConverterUtils;
* The information is shared across different components using AppContext.
*/
+@SuppressWarnings("deprecation")
public class MRAppMaster extends CompositeService {
private static final Log LOG = LogFactory.getLog(MRAppMaster.class);
private Clock clock;
- private final long startTime = System.currentTimeMillis();
+ private final long startTime;
+ private final long appSubmitTime;
private String appName;
private final ApplicationAttemptId appAttemptID;
+ private final ContainerId containerID;
+ private final String nmHost;
+ private final int nmPort;
+ private final int nmHttpPort;
protected final MRAppMetrics metrics;
private Set completedTasksFromPreviousRun;
+ private List amInfos;
private AppContext context;
private Dispatcher dispatcher;
private ClientService clientService;
+ private Recovery recoveryServ;
private ContainerAllocator containerAllocator;
private ContainerLauncher containerLauncher;
private TaskCleaner taskCleaner;
@@ -128,39 +151,83 @@ public class MRAppMaster extends CompositeService {
private TaskAttemptListener taskAttemptListener;
private JobTokenSecretManager jobTokenSecretManager =
new JobTokenSecretManager();
+ private JobId jobId;
+ private boolean newApiCommitter;
+ private OutputCommitter committer;
private JobEventDispatcher jobEventDispatcher;
+ private boolean inRecovery = false;
private Job job;
-
- public MRAppMaster(ApplicationAttemptId applicationAttemptId) {
- this(applicationAttemptId, new SystemClock());
+ private Credentials fsTokens = new Credentials(); // Filled during init
+ private UserGroupInformation currentUser; // Will be setup during init
+
+ public MRAppMaster(ApplicationAttemptId applicationAttemptId,
+ ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
+ long appSubmitTime) {
+ this(applicationAttemptId, containerId, nmHost, nmPort, nmHttpPort,
+ new SystemClock(), appSubmitTime);
}
- public MRAppMaster(ApplicationAttemptId applicationAttemptId, Clock clock) {
+ public MRAppMaster(ApplicationAttemptId applicationAttemptId,
+ ContainerId containerId, String nmHost, int nmPort, int nmHttpPort,
+ Clock clock, long appSubmitTime) {
super(MRAppMaster.class.getName());
this.clock = clock;
+ this.startTime = clock.getTime();
+ this.appSubmitTime = appSubmitTime;
this.appAttemptID = applicationAttemptId;
+ this.containerID = containerId;
+ this.nmHost = nmHost;
+ this.nmPort = nmPort;
+ this.nmHttpPort = nmHttpPort;
this.metrics = MRAppMetrics.create();
LOG.info("Created MRAppMaster for application " + applicationAttemptId);
}
@Override
public void init(final Configuration conf) {
+
+ downloadTokensAndSetupUGI(conf);
+
context = new RunningAppContext(conf);
// Job name is the same as the app name util we support DAG of jobs
// for an app later
appName = conf.get(MRJobConfig.JOB_NAME, "");
- if (conf.getBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, false)
- && appAttemptID.getAttemptId() > 1) {
- LOG.info("Recovery is enabled. Will try to recover from previous life.");
- Recovery recoveryServ = new RecoveryService(appAttemptID, clock);
+ conf.setInt(MRJobConfig.APPLICATION_ATTEMPT_ID, appAttemptID.getAttemptId());
+
+ newApiCommitter = false;
+ jobId = MRBuilderUtils.newJobId(appAttemptID.getApplicationId(),
+ appAttemptID.getApplicationId().getId());
+ int numReduceTasks = conf.getInt(MRJobConfig.NUM_REDUCES, 0);
+ if ((numReduceTasks > 0 &&
+ conf.getBoolean("mapred.reducer.new-api", false)) ||
+ (numReduceTasks == 0 &&
+ conf.getBoolean("mapred.mapper.new-api", false))) {
+ newApiCommitter = true;
+ LOG.info("Using mapred newApiCommitter.");
+ }
+
+ committer = createOutputCommitter(conf);
+ boolean recoveryEnabled = conf.getBoolean(
+ MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
+ boolean recoverySupportedByCommitter = committer.isRecoverySupported();
+ if (recoveryEnabled && recoverySupportedByCommitter
+ && appAttemptID.getAttemptId() > 1) {
+ LOG.info("Recovery is enabled. "
+ + "Will try to recover from previous life on best effort basis.");
+ recoveryServ = new RecoveryService(appAttemptID, clock,
+ committer);
addIfService(recoveryServ);
dispatcher = recoveryServ.getDispatcher();
clock = recoveryServ.getClock();
- completedTasksFromPreviousRun = recoveryServ.getCompletedTasks();
+ inRecovery = true;
} else {
+ LOG.info("Not starting RecoveryService: recoveryEnabled: "
+ + recoveryEnabled + " recoverySupportedByCommitter: "
+ + recoverySupportedByCommitter + " ApplicationAttemptID: "
+ + appAttemptID.getAttemptId());
dispatcher = new AsyncDispatcher();
addIfService(dispatcher);
}
@@ -223,15 +290,165 @@ public class MRAppMaster extends CompositeService {
super.init(conf);
} // end of init()
+ private OutputCommitter createOutputCommitter(Configuration conf) {
+ OutputCommitter committer = null;
+
+ LOG.info("OutputCommitter set in config "
+ + conf.get("mapred.output.committer.class"));
+
+ if (newApiCommitter) {
+ org.apache.hadoop.mapreduce.v2.api.records.TaskId taskID = MRBuilderUtils
+ .newTaskId(jobId, 0, TaskType.MAP);
+ org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = MRBuilderUtils
+ .newTaskAttemptId(taskID, 0);
+ TaskAttemptContext taskContext = new TaskAttemptContextImpl(conf,
+ TypeConverter.fromYarn(attemptID));
+ OutputFormat outputFormat;
+ try {
+ outputFormat = ReflectionUtils.newInstance(taskContext
+ .getOutputFormatClass(), conf);
+ committer = outputFormat.getOutputCommitter(taskContext);
+ } catch (Exception e) {
+ throw new YarnException(e);
+ }
+ } else {
+ committer = ReflectionUtils.newInstance(conf.getClass(
+ "mapred.output.committer.class", FileOutputCommitter.class,
+ org.apache.hadoop.mapred.OutputCommitter.class), conf);
+ }
+ LOG.info("OutputCommitter is " + committer.getClass().getName());
+ return committer;
+ }
+
+ protected boolean keepJobFiles(JobConf conf) {
+ return (conf.getKeepTaskFilesPattern() != null || conf
+ .getKeepFailedTaskFiles());
+ }
+
+ /**
+ * Create the default file System for this job.
+ * @param conf the conf object
+ * @return the default filesystem for this job
+ * @throws IOException
+ */
+ protected FileSystem getFileSystem(Configuration conf) throws IOException {
+ return FileSystem.get(conf);
+ }
+
+ /**
+ * clean up staging directories for the job.
+ * @throws IOException
+ */
+ public void cleanupStagingDir() throws IOException {
+ /* make sure we clean the staging files */
+ String jobTempDir = null;
+ FileSystem fs = getFileSystem(getConfig());
+ try {
+ if (!keepJobFiles(new JobConf(getConfig()))) {
+ jobTempDir = getConfig().get(MRJobConfig.MAPREDUCE_JOB_DIR);
+ if (jobTempDir == null) {
+ LOG.warn("Job Staging directory is null");
+ return;
+ }
+ Path jobTempDirPath = new Path(jobTempDir);
+ LOG.info("Deleting staging directory " + FileSystem.getDefaultUri(getConfig()) +
+ " " + jobTempDir);
+ fs.delete(jobTempDirPath, true);
+ }
+ } catch(IOException io) {
+ LOG.error("Failed to cleanup staging dir " + jobTempDir, io);
+ }
+ }
+
+ /**
+ * Exit call. Just in a function call to enable testing.
+ */
+ protected void sysexit() {
+ System.exit(0);
+ }
+
+ private class JobFinishEventHandler implements EventHandler {
+ @Override
+ public void handle(JobFinishEvent event) {
+ // job has finished
+ // this is the only job, so shut down the Appmaster
+ // note in a workflow scenario, this may lead to creation of a new
+ // job (FIXME?)
+
+ // TODO:currently just wait for some time so clients can know the
+ // final states. Will be removed once RM come on.
+ try {
+ Thread.sleep(5000);
+ } catch (InterruptedException e) {
+ e.printStackTrace();
+ }
+ try {
+ // Stop all services
+ // This will also send the final report to the ResourceManager
+ LOG.info("Calling stop for all the services");
+ stop();
+
+ // Send job-end notification
+ try {
+ LOG.info("Job end notification started for jobID : "
+ + job.getReport().getJobId());
+ JobEndNotifier notifier = new JobEndNotifier();
+ notifier.setConf(getConfig());
+ notifier.notify(job.getReport());
+ } catch (InterruptedException ie) {
+ LOG.warn("Job end notification interrupted for jobID : "
+ + job.getReport().getJobId(), ie );
+ }
+ } catch (Throwable t) {
+ LOG.warn("Graceful stop failed ", t);
+ }
+
+ // Cleanup staging directory
+ try {
+ cleanupStagingDir();
+ } catch(IOException io) {
+ LOG.warn("Failed to delete staging dir");
+ }
+
+ //Bring the process down by force.
+ //Not needed after HADOOP-7140
+ LOG.info("Exiting MR AppMaster..GoodBye!");
+ sysexit();
+ }
+ }
+
+ /**
+ * create an event handler that handles the job finish event.
+ * @return the job finish event handler.
+ */
+ protected EventHandler createJobFinishEventHandler() {
+ return new JobFinishEventHandler();
+ }
+
/** Create and initialize (but don't start) a single job. */
protected Job createJob(Configuration conf) {
- // ////////// Obtain the tokens needed by the job. //////////
- Credentials fsTokens = new Credentials();
- UserGroupInformation currentUser = null;
+ // create single job
+ Job newJob = new JobImpl(jobId, appAttemptID, conf, dispatcher
+ .getEventHandler(), taskAttemptListener, jobTokenSecretManager,
+ fsTokens, clock, completedTasksFromPreviousRun, metrics, committer,
+ newApiCommitter, currentUser.getUserName(), appSubmitTime, amInfos);
+ ((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
+
+ dispatcher.register(JobFinishEvent.Type.class,
+ createJobFinishEventHandler());
+ return newJob;
+ } // end createJob()
+
+
+ /**
+ * Obtain the tokens needed by the job and put them in the UGI
+ * @param conf
+ */
+ protected void downloadTokensAndSetupUGI(Configuration conf) {
try {
- currentUser = UserGroupInformation.getCurrentUser();
+ this.currentUser = UserGroupInformation.getCurrentUser();
if (UserGroupInformation.isSecurityEnabled()) {
// Read the file-system tokens from the localized tokens-file.
@@ -246,56 +463,18 @@ public class MRAppMaster extends CompositeService {
+ jobTokenFile);
for (Token extends TokenIdentifier> tk : fsTokens.getAllTokens()) {
- LOG.info(" --- DEBUG: Token of kind " + tk.getKind()
- + "in current ugi in the AppMaster for service "
- + tk.getService());
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Token of kind " + tk.getKind()
+ + "in current ugi in the AppMaster for service "
+ + tk.getService());
+ }
currentUser.addToken(tk); // For use by AppMaster itself.
}
}
} catch (IOException e) {
throw new YarnException(e);
}
- // ////////// End of obtaining the tokens needed by the job. //////////
-
- // create single job
- Job newJob = new JobImpl(appAttemptID, conf, dispatcher.getEventHandler(),
- taskAttemptListener, jobTokenSecretManager, fsTokens, clock,
- completedTasksFromPreviousRun, metrics, currentUser.getUserName());
- ((RunningAppContext) context).jobs.put(newJob.getID(), newJob);
-
- dispatcher.register(JobFinishEvent.Type.class,
- new EventHandler() {
- @Override
- public void handle(JobFinishEvent event) {
- // job has finished
- // this is the only job, so shut down the Appmaster
- // note in a workflow scenario, this may lead to creation of a new
- // job (FIXME?)
-
- // TODO:currently just wait for some time so clients can know the
- // final states. Will be removed once RM come on.
- try {
- Thread.sleep(5000);
- } catch (InterruptedException e) {
- e.printStackTrace();
- }
- LOG.info("Calling stop for all the services");
- try {
- stop();
- } catch (Throwable t) {
- LOG.warn("Graceful stop failed ", t);
- }
- //TODO: this is required because rpc server does not shut down
- // in spite of calling server.stop().
- //Bring the process down by force.
- //Not needed after HADOOP-7140
- LOG.info("Exiting MR AppMaster..GoodBye!");
- System.exit(0);
- }
- });
-
- return newJob;
- } // end createJob()
+ }
protected void addIfService(Object object) {
if (object instanceof Service) {
@@ -373,6 +552,22 @@ public class MRAppMaster extends CompositeService {
return appAttemptID.getApplicationId();
}
+ public ApplicationAttemptId getAttemptID() {
+ return appAttemptID;
+ }
+
+ public JobId getJobId() {
+ return jobId;
+ }
+
+ public OutputCommitter getCommitter() {
+ return committer;
+ }
+
+ public boolean isNewApiCommitter() {
+ return newApiCommitter;
+ }
+
public int getStartCount() {
return appAttemptID.getAttemptId();
}
@@ -389,6 +584,10 @@ public class MRAppMaster extends CompositeService {
return completedTasksFromPreviousRun;
}
+ public List getAllAMInfos() {
+ return amInfos;
+ }
+
public ContainerAllocator getContainerAllocator() {
return containerAllocator;
}
@@ -522,6 +721,7 @@ public class MRAppMaster extends CompositeService {
return jobs;
}
+ @SuppressWarnings("rawtypes")
@Override
public EventHandler getEventHandler() {
return dispatcher.getEventHandler();
@@ -538,13 +738,39 @@ public class MRAppMaster extends CompositeService {
}
}
+ @SuppressWarnings("unchecked")
@Override
public void start() {
- ///////////////////// Create the job itself.
+ // Pull completedTasks etc from recovery
+ if (inRecovery) {
+ completedTasksFromPreviousRun = recoveryServ.getCompletedTasks();
+ amInfos = recoveryServ.getAMInfos();
+ }
+
+ // / Create the AMInfo for the current AppMaster
+ if (amInfos == null) {
+ amInfos = new LinkedList();
+ }
+ AMInfo amInfo =
+ MRBuilderUtils.newAMInfo(appAttemptID, startTime, containerID, nmHost,
+ nmPort, nmHttpPort);
+ amInfos.add(amInfo);
+
+ // /////////////////// Create the job itself.
job = createJob(getConfig());
+
// End of creating the job.
+ // Send out an MR AM inited event for this AM and all previous AMs.
+ for (AMInfo info : amInfos) {
+ dispatcher.getEventHandler().handle(
+ new JobHistoryEvent(job.getID(), new AMStartedEvent(info
+ .getAppAttemptId(), info.getStartTime(), info.getContainerId(),
+ info.getNodeManagerHost(), info.getNodeManagerPort(), info
+ .getNodeManagerHttpPort())));
+ }
+
// metrics system init is really init & start.
// It's more test friendly to put it here.
DefaultMetricsSystem.initialize("MRAppMaster");
@@ -590,6 +816,7 @@ public class MRAppMaster extends CompositeService {
* In a typical workflow, one presumably would want to uberize only a subset
* of the jobs (the "small" ones), which is awkward with the current design.
*/
+ @SuppressWarnings("unchecked")
protected void startJobs() {
/** create a job-start event to get this ball rolling */
JobEvent startJobEvent = new JobEvent(job.getID(), JobEventType.JOB_START);
@@ -598,6 +825,7 @@ public class MRAppMaster extends CompositeService {
}
private class JobEventDispatcher implements EventHandler {
+ @SuppressWarnings("unchecked")
@Override
public void handle(JobEvent event) {
((EventHandler)context.getJob(event.getJobId())).handle(event);
@@ -605,6 +833,7 @@ public class MRAppMaster extends CompositeService {
}
private class TaskEventDispatcher implements EventHandler {
+ @SuppressWarnings("unchecked")
@Override
public void handle(TaskEvent event) {
Task task = context.getJob(event.getTaskID().getJobId()).getTask(
@@ -615,6 +844,7 @@ public class MRAppMaster extends CompositeService {
private class TaskAttemptEventDispatcher
implements EventHandler {
+ @SuppressWarnings("unchecked")
@Override
public void handle(TaskAttemptEvent event) {
Job job = context.getJob(event.getTaskAttemptID().getTaskId().getJobId());
@@ -640,19 +870,44 @@ public class MRAppMaster extends CompositeService {
}
}
+ private static void validateInputParam(String value, String param)
+ throws IOException {
+ if (value == null) {
+ String msg = param + " is null";
+ LOG.error(msg);
+ throw new IOException(msg);
+ }
+ }
+
public static void main(String[] args) {
try {
- String applicationAttemptIdStr = System
- .getenv(ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV);
- if (applicationAttemptIdStr == null) {
- String msg = ApplicationConstants.APPLICATION_ATTEMPT_ID_ENV
- + " is null";
- LOG.error(msg);
- throw new IOException(msg);
- }
- ApplicationAttemptId applicationAttemptId = ConverterUtils
- .toApplicationAttemptId(applicationAttemptIdStr);
- MRAppMaster appMaster = new MRAppMaster(applicationAttemptId);
+ String containerIdStr =
+ System.getenv(ApplicationConstants.AM_CONTAINER_ID_ENV);
+ String nodeHostString = System.getenv(ApplicationConstants.NM_HOST_ENV);
+ String nodePortString = System.getenv(ApplicationConstants.NM_PORT_ENV);
+ String nodeHttpPortString =
+ System.getenv(ApplicationConstants.NM_HTTP_PORT_ENV);
+ String appSubmitTimeStr =
+ System.getenv(ApplicationConstants.APP_SUBMIT_TIME_ENV);
+
+ validateInputParam(containerIdStr,
+ ApplicationConstants.AM_CONTAINER_ID_ENV);
+ validateInputParam(nodeHostString, ApplicationConstants.NM_HOST_ENV);
+ validateInputParam(nodePortString, ApplicationConstants.NM_PORT_ENV);
+ validateInputParam(nodeHttpPortString,
+ ApplicationConstants.NM_HTTP_PORT_ENV);
+ validateInputParam(appSubmitTimeStr,
+ ApplicationConstants.APP_SUBMIT_TIME_ENV);
+
+ ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
+ ApplicationAttemptId applicationAttemptId =
+ containerId.getApplicationAttemptId();
+ long appSubmitTime = Long.parseLong(appSubmitTimeStr);
+
+ MRAppMaster appMaster =
+ new MRAppMaster(applicationAttemptId, containerId, nodeHostString,
+ Integer.parseInt(nodePortString),
+ Integer.parseInt(nodeHttpPortString), appSubmitTime);
Runtime.getRuntime().addShutdownHook(
new CompositeServiceShutdownHook(appMaster));
YarnConfiguration conf = new YarnConfiguration(new JobConf());
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
index 10cb4e29adf..b94e4122a81 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRClientSecurityInfo.java
@@ -27,7 +27,7 @@ import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.token.TokenInfo;
import org.apache.hadoop.security.token.TokenSelector;
import org.apache.hadoop.yarn.proto.MRClientProtocol;
-import org.apache.hadoop.yarn.security.ApplicationTokenSelector;
+import org.apache.hadoop.yarn.security.client.ClientTokenSelector;
public class MRClientSecurityInfo extends SecurityInfo {
@@ -51,7 +51,7 @@ public class MRClientSecurityInfo extends SecurityInfo {
@Override
public Class extends TokenSelector extends TokenIdentifier>>
value() {
- return ApplicationTokenSelector.class;
+ return ClientTokenSelector.class;
}
};
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
index 9df88d633cf..b5e5cd37b24 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskAttemptListener.java
@@ -24,12 +24,35 @@ import org.apache.hadoop.mapred.Task;
import org.apache.hadoop.mapred.WrappedJvmID;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+/**
+ * This class listens for changes to the state of a Task.
+ */
public interface TaskAttemptListener {
InetSocketAddress getAddress();
- void register(TaskAttemptId attemptID, Task task, WrappedJvmID jvmID);
+ /**
+ * register a JVM with the listener. This should be called as soon as a
+ * JVM ID is assigned to a task attempt, before it has been launched.
+ * @param jvmID The ID of the JVM .
+ */
+ void registerPendingTask(WrappedJvmID jvmID);
+
+ /**
+ * Register the task and task attempt with the JVM. This should be called
+ * when the JVM has been launched.
+ * @param attemptID the id of the attempt for this JVM.
+ * @param task the task itself for this JVM.
+ * @param jvmID the id of the JVM handling the task.
+ */
+ void registerLaunchedTask(TaskAttemptId attemptID, Task task, WrappedJvmID jvmID);
+ /**
+ * Unregister the JVM and the attempt associated with it. This should be
+ * called when the attempt/JVM has finished executing and is being cleaned up.
+ * @param attemptID the ID of the attempt.
+ * @param jvmID the ID of the JVM for that attempt.
+ */
void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
index 2218c889c38..ccd03459e2d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/TaskHeartbeatHandler.java
@@ -71,6 +71,7 @@ public class TaskHeartbeatHandler extends AbstractService {
@Override
public void start() {
lostTaskCheckerThread = new Thread(new PingChecker());
+ lostTaskCheckerThread.setName("TaskHeartbeatHandler PingChecker");
lostTaskCheckerThread.start();
super.start();
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
index faf11a117cc..0c27d23dc77 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/MRClientService.java
@@ -18,20 +18,18 @@
package org.apache.hadoop.mapreduce.v2.app.client;
-import java.io.IOException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.UnknownHostException;
-import java.security.AccessControlException;
import java.util.Arrays;
import java.util.Collection;
-import org.apache.avro.ipc.Server;
import org.apache.commons.codec.binary.Base64;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.FailTaskAttemptRequest;
@@ -72,21 +70,20 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.security.authorize.MRAMPolicyProvider;
import org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.PolicyProvider;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.ApplicationTokenIdentifier;
-import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
import org.apache.hadoop.yarn.security.client.ClientToAMSecretManager;
+import org.apache.hadoop.yarn.security.client.ClientTokenIdentifier;
import org.apache.hadoop.yarn.service.AbstractService;
import org.apache.hadoop.yarn.webapp.WebApp;
import org.apache.hadoop.yarn.webapp.WebApps;
@@ -131,8 +128,8 @@ public class MRClientService extends AbstractService
System
.getenv(ApplicationConstants.APPLICATION_CLIENT_SECRET_ENV_NAME);
byte[] bytes = Base64.decodeBase64(secretKeyStr);
- ApplicationTokenIdentifier identifier =
- new ApplicationTokenIdentifier(this.appContext.getApplicationID());
+ ClientTokenIdentifier identifier = new ClientTokenIdentifier(
+ this.appContext.getApplicationID());
secretManager.setMasterKey(identifier, bytes);
}
server =
@@ -140,6 +137,14 @@ public class MRClientService extends AbstractService
conf, secretManager,
conf.getInt(MRJobConfig.MR_AM_JOB_CLIENT_THREAD_COUNT,
MRJobConfig.DEFAULT_MR_AM_JOB_CLIENT_THREAD_COUNT));
+
+ // Enable service authorization?
+ if (conf.getBoolean(
+ CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHORIZATION,
+ false)) {
+ refreshServiceAcls(conf, new MRAMPolicyProvider());
+ }
+
server.start();
this.bindAddress =
NetUtils.createSocketAddr(hostNameResolved.getHostAddress()
@@ -154,8 +159,13 @@ public class MRClientService extends AbstractService
super.start();
}
+ void refreshServiceAcls(Configuration configuration,
+ PolicyProvider policyProvider) {
+ this.server.refreshServiceAcl(configuration, policyProvider);
+ }
+
public void stop() {
- server.close();
+ server.stop();
if (webApp != null) {
webApp.stop();
}
@@ -183,13 +193,6 @@ public class MRClientService extends AbstractService
if (job == null) {
throw RPCUtil.getRemoteException("Unknown job " + jobID);
}
- //TODO fix job acls.
- //JobACL operation = JobACL.VIEW_JOB;
- //if (modifyAccess) {
- // operation = JobACL.MODIFY_JOB;
- //}
- //TO disable check access ofr now.
- //checkAccess(job, operation);
return job;
}
@@ -213,24 +216,6 @@ public class MRClientService extends AbstractService
return attempt;
}
- private void checkAccess(Job job, JobACL jobOperation)
- throws YarnRemoteException {
- if (!UserGroupInformation.isSecurityEnabled()) {
- return;
- }
- UserGroupInformation callerUGI;
- try {
- callerUGI = UserGroupInformation.getCurrentUser();
- } catch (IOException e) {
- throw RPCUtil.getRemoteException(e);
- }
- if(!job.checkAccess(callerUGI, jobOperation)) {
- throw RPCUtil.getRemoteException(new AccessControlException("User "
- + callerUGI.getShortUserName() + " cannot perform operation "
- + jobOperation.name() + " on " + job.getID()));
- }
- }
-
@Override
public GetCountersResponse getCounters(GetCountersRequest request)
throws YarnRemoteException {
@@ -291,6 +276,7 @@ public class MRClientService extends AbstractService
return response;
}
+ @SuppressWarnings("unchecked")
@Override
public KillJobResponse killJob(KillJobRequest request)
throws YarnRemoteException {
@@ -307,6 +293,7 @@ public class MRClientService extends AbstractService
return response;
}
+ @SuppressWarnings("unchecked")
@Override
public KillTaskResponse killTask(KillTaskRequest request)
throws YarnRemoteException {
@@ -321,6 +308,7 @@ public class MRClientService extends AbstractService
return response;
}
+ @SuppressWarnings("unchecked")
@Override
public KillTaskAttemptResponse killTaskAttempt(
KillTaskAttemptRequest request) throws YarnRemoteException {
@@ -350,6 +338,7 @@ public class MRClientService extends AbstractService
return response;
}
+ @SuppressWarnings("unchecked")
@Override
public FailTaskAttemptResponse failTaskAttempt(
FailTaskAttemptRequest request) throws YarnRemoteException {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/package-info.java
new file mode 100644
index 00000000000..38980c36986
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/client/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.client;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
index 658f2cb877d..fdba78d9b3f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/Job.java
@@ -23,6 +23,7 @@ import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -68,5 +69,10 @@ public interface Job {
TaskAttemptCompletionEvent[]
getTaskAttemptCompletionEvents(int fromEventId, int maxEvents);
+ /**
+ * @return information for MR AppMasters (previously failed and current)
+ */
+ List getAMInfos();
+
boolean checkAccess(UserGroupInformation callerUGI, JobACL jobOperation);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
index f6cf83de98a..cc7449524e0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/TaskAttempt.java
@@ -58,6 +58,11 @@ public interface TaskAttempt {
* @return node's http address if a container is assigned, otherwise null.
*/
String getNodeHttpAddress();
+
+ /**
+ * @return node's rack name if a container is assigned, otherwise null.
+ */
+ String getNodeRackName();
/**
* @return time at which container is launched. If container is not launched
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java
index dc121922e71..0f69fa8c206 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptContainerAssignedEvent.java
@@ -18,22 +18,29 @@
package org.apache.hadoop.mapreduce.v2.app.job.event;
+import java.util.Map;
+
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.Container;
-
-
public class TaskAttemptContainerAssignedEvent extends TaskAttemptEvent {
private final Container container;
+ private final Map applicationACLs;
public TaskAttemptContainerAssignedEvent(TaskAttemptId id,
- Container container) {
+ Container container, Map applicationACLs) {
super(id, TaskAttemptEventType.TA_ASSIGNED);
this.container = container;
+ this.applicationACLs = applicationACLs;
}
public Container getContainer() {
return this.container;
}
+
+ public Map getApplicationACLs() {
+ return this.applicationACLs;
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
index c3645355d84..6a9b78af43d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/TaskAttemptStatusUpdateEvent.java
@@ -48,7 +48,6 @@ public class TaskAttemptStatusUpdateEvent extends TaskAttemptEvent {
public TaskAttemptId id;
public float progress;
public Counters counters;
- public String diagnosticInfo;
public String stateString;
public Phase phase;
public long outputSize;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/package-info.java
new file mode 100644
index 00000000000..5684b0528c2
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/event/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.job.event;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
index 4a47462a2d7..ea8bbdf5d6b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/JobImpl.java
@@ -39,15 +39,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.FileOutputCommitter;
import org.apache.hadoop.mapred.JobACLsManager;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.OutputCommitter;
-import org.apache.hadoop.mapreduce.OutputFormat;
-import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
@@ -63,7 +60,7 @@ import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.mapreduce.task.JobContextImpl;
-import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Counter;
import org.apache.hadoop.mapreduce.v2.api.records.CounterGroup;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
@@ -97,14 +94,11 @@ import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.event.EventHandler;
-import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
import org.apache.hadoop.yarn.state.MultipleArcTransition;
@@ -125,21 +119,21 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
// Maximum no. of fetch-failure notifications after which map task is failed
private static final int MAX_FETCH_FAILURES_NOTIFICATIONS = 3;
-
- private final RecordFactory recordFactory =
- RecordFactoryProvider.getRecordFactory(null);
//final fields
private final ApplicationAttemptId applicationAttemptId;
private final Clock clock;
private final JobACLsManager aclsManager;
private final String username;
+ private final OutputCommitter committer;
private final Map jobACLs;
private final Set completedTasksFromPreviousRun;
+ private final List amInfos;
private final Lock readLock;
private final Lock writeLock;
private final JobId jobId;
private final String jobName;
+ private final boolean newApiCommitter;
private final org.apache.hadoop.mapreduce.JobID oldJobId;
private final TaskAttemptListener taskAttemptListener;
private final Object tasksSyncHandle = new Object();
@@ -148,6 +142,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private final EventHandler eventHandler;
private final MRAppMetrics metrics;
private final String userName;
+ private final long appSubmitTime;
private boolean lazyTasksCopyNeeded = false;
private volatile Map tasks = new LinkedHashMap();
@@ -164,7 +159,6 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private Path remoteJobSubmitDir;
public Path remoteJobConfFile;
private JobContext jobContext;
- private OutputCommitter committer;
private int allowedMapFailuresPercent = 0;
private int allowedReduceFailuresPercent = 0;
private List taskAttemptCompletionEvents;
@@ -339,7 +333,6 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
JobEventType.JOB_DIAGNOSTIC_UPDATE,
JobEventType.JOB_TASK_ATTEMPT_FETCH_FAILURE,
JobEventType.INTERNAL_ERROR))
-
// create the topology tables
.installTopology();
@@ -355,7 +348,6 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private int failedReduceTaskCount = 0;
private int killedMapTaskCount = 0;
private int killedReduceTaskCount = 0;
- private long submitTime;
private long startTime;
private long finishTime;
private float setupProgress;
@@ -366,29 +358,27 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
private Token jobToken;
private JobTokenSecretManager jobTokenSecretManager;
- public JobImpl(ApplicationAttemptId applicationAttemptId, Configuration conf,
- EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
+ public JobImpl(JobId jobId, ApplicationAttemptId applicationAttemptId,
+ Configuration conf, EventHandler eventHandler,
+ TaskAttemptListener taskAttemptListener,
JobTokenSecretManager jobTokenSecretManager,
- Credentials fsTokenCredentials, Clock clock,
+ Credentials fsTokenCredentials, Clock clock,
Set completedTasksFromPreviousRun, MRAppMetrics metrics,
- String userName) {
+ OutputCommitter committer, boolean newApiCommitter, String userName,
+ long appSubmitTime, List amInfos) {
this.applicationAttemptId = applicationAttemptId;
- this.jobId = recordFactory.newRecordInstance(JobId.class);
+ this.jobId = jobId;
this.jobName = conf.get(JobContext.JOB_NAME, "");
this.conf = conf;
this.metrics = metrics;
this.clock = clock;
this.completedTasksFromPreviousRun = completedTasksFromPreviousRun;
+ this.amInfos = amInfos;
this.userName = userName;
- ApplicationId applicationId = applicationAttemptId.getApplicationId();
- jobId.setAppId(applicationId);
- jobId.setId(applicationId.getId());
- oldJobId = TypeConverter.fromYarn(jobId);
- LOG.info("Job created" +
- " appId=" + applicationId +
- " jobId=" + jobId +
- " oldJobId=" + oldJobId);
-
+ this.appSubmitTime = appSubmitTime;
+ this.oldJobId = TypeConverter.fromYarn(jobId);
+ this.newApiCommitter = newApiCommitter;
+
this.taskAttemptListener = taskAttemptListener;
this.eventHandler = eventHandler;
ReadWriteLock readWriteLock = new ReentrantReadWriteLock();
@@ -397,6 +387,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
this.fsTokens = fsTokenCredentials;
this.jobTokenSecretManager = jobTokenSecretManager;
+ this.committer = committer;
this.aclsManager = new JobACLsManager(conf);
this.username = System.getProperty("user.name");
@@ -589,13 +580,14 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
if (getState() == JobState.NEW) {
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
- startTime, finishTime, setupProgress, 0.0f,
- 0.0f, cleanupProgress, remoteJobConfFile.toString());
+ appSubmitTime, startTime, finishTime, setupProgress, 0.0f, 0.0f,
+ cleanupProgress, remoteJobConfFile.toString(), amInfos);
}
return MRBuilderUtils.newJobReport(jobId, jobName, username, state,
- startTime, finishTime, setupProgress, computeProgress(mapTasks),
- computeProgress(reduceTasks), cleanupProgress, remoteJobConfFile.toString());
+ appSubmitTime, startTime, finishTime, setupProgress,
+ computeProgress(mapTasks), computeProgress(reduceTasks),
+ cleanupProgress, remoteJobConfFile.toString(), amInfos);
} finally {
readLock.unlock();
}
@@ -724,6 +716,16 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
this.getEventHandler().handle(new JobHistoryEvent(this.jobId, jfe));
}
+ /**
+ * Create the default file System for this job.
+ * @param conf the conf object
+ * @return the default filesystem for this job
+ * @throws IOException
+ */
+ protected FileSystem getFileSystem(Configuration conf) throws IOException {
+ return FileSystem.get(conf);
+ }
+
static JobState checkJobCompleteSuccess(JobImpl job) {
// check for Job success
if (job.completedTaskCount == job.getTasks().size()) {
@@ -733,7 +735,6 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
} catch (IOException e) {
LOG.warn("Could not do commit for Job", e);
}
-
job.logJobHistoryFinishedEvent();
return job.finished(JobState.SUCCEEDED);
}
@@ -798,6 +799,11 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
public Map getJobACLs() {
return Collections.unmodifiableMap(jobACLs);
}
+
+ @Override
+ public List getAMInfos() {
+ return amInfos;
+ }
public static class InitTransition
implements MultipleArcTransition {
@@ -811,18 +817,17 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
*/
@Override
public JobState transition(JobImpl job, JobEvent event) {
- job.submitTime = job.clock.getTime();
job.metrics.submittedJob(job);
job.metrics.preparingJob(job);
try {
setup(job);
- job.fs = FileSystem.get(job.conf);
+ job.fs = job.getFileSystem(job.conf);
//log to job history
JobSubmittedEvent jse = new JobSubmittedEvent(job.oldJobId,
job.conf.get(MRJobConfig.JOB_NAME, "test"),
job.conf.get(MRJobConfig.USER_NAME, "mapred"),
- job.submitTime,
+ job.appSubmitTime,
job.remoteJobConfFile.toString(),
job.jobACLs, job.conf.get(MRJobConfig.QUEUE_NAME, "default"));
job.eventHandler.handle(new JobHistoryEvent(job.jobId, jse));
@@ -838,60 +843,30 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
checkTaskLimits();
-
- boolean newApiCommitter = false;
- if ((job.numReduceTasks > 0 &&
- job.conf.getBoolean("mapred.reducer.new-api", false)) ||
- (job.numReduceTasks == 0 &&
- job.conf.getBoolean("mapred.mapper.new-api", false))) {
- newApiCommitter = true;
- LOG.info("Using mapred newApiCommitter.");
- }
-
- LOG.info("OutputCommitter set in config " + job.conf.get("mapred.output.committer.class"));
-
- if (newApiCommitter) {
+ if (job.newApiCommitter) {
job.jobContext = new JobContextImpl(job.conf,
job.oldJobId);
- org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId attemptID = RecordFactoryProvider
- .getRecordFactory(null)
- .newRecordInstance(
- org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId.class);
- attemptID.setTaskId(RecordFactoryProvider.getRecordFactory(null)
- .newRecordInstance(TaskId.class));
- attemptID.getTaskId().setJobId(job.jobId);
- attemptID.getTaskId().setTaskType(TaskType.MAP);
- TaskAttemptContext taskContext = new TaskAttemptContextImpl(job.conf,
- TypeConverter.fromYarn(attemptID));
- try {
- OutputFormat outputFormat = ReflectionUtils.newInstance(
- taskContext.getOutputFormatClass(), job.conf);
- job.committer = outputFormat.getOutputCommitter(taskContext);
- } catch(Exception e) {
- throw new IOException("Failed to assign outputcommitter", e);
- }
} else {
job.jobContext = new org.apache.hadoop.mapred.JobContextImpl(
new JobConf(job.conf), job.oldJobId);
- job.committer = ReflectionUtils.newInstance(
- job.conf.getClass("mapred.output.committer.class", FileOutputCommitter.class,
- org.apache.hadoop.mapred.OutputCommitter.class), job.conf);
}
- LOG.info("OutputCommitter is " + job.committer.getClass().getName());
long inputLength = 0;
for (int i = 0; i < job.numMapTasks; ++i) {
inputLength += taskSplitMetaInfo[i].getInputDataLength();
}
-//FIXME: need new memory criterion for uber-decision (oops, too late here; until AM-resizing supported, must depend on job client to pass fat-slot needs)
+ //FIXME: need new memory criterion for uber-decision (oops, too late here;
+ // until AM-resizing supported, must depend on job client to pass fat-slot needs)
// these are no longer "system" settings, necessarily; user may override
int sysMaxMaps = job.conf.getInt(MRJobConfig.JOB_UBERTASK_MAXMAPS, 9);
int sysMaxReduces =
job.conf.getInt(MRJobConfig.JOB_UBERTASK_MAXREDUCES, 1);
long sysMaxBytes = job.conf.getLong(MRJobConfig.JOB_UBERTASK_MAXBYTES,
- job.conf.getLong("dfs.block.size", 64*1024*1024)); //FIXME: this is wrong; get FS from [File?]InputFormat and default block size from that
- //long sysMemSizeForUberSlot = JobTracker.getMemSizeForReduceSlot(); // FIXME [could use default AM-container memory size...]
+ job.conf.getLong("dfs.block.size", 64*1024*1024)); //FIXME: this is
+ // wrong; get FS from [File?]InputFormat and default block size from that
+ //long sysMemSizeForUberSlot = JobTracker.getMemSizeForReduceSlot();
+ // FIXME [could use default AM-container memory size...]
boolean uberEnabled =
job.conf.getBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
@@ -900,8 +875,8 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
boolean smallInput = (inputLength <= sysMaxBytes);
boolean smallMemory = true; //FIXME (see above)
// ignoring overhead due to UberTask and statics as negligible here:
-// FIXME && (Math.max(memoryPerMap, memoryPerReduce) <= sysMemSizeForUberSlot
-// || sysMemSizeForUberSlot == JobConf.DISABLED_MEMORY_LIMIT)
+ // FIXME && (Math.max(memoryPerMap, memoryPerReduce) <= sysMemSizeForUberSlot
+ // || sysMemSizeForUberSlot == JobConf.DISABLED_MEMORY_LIMIT)
boolean notChainJob = !isChainJob(job.conf);
// User has overall veto power over uberization, or user can modify
@@ -935,7 +910,9 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
job.conf.setInt(MRJobConfig.REDUCE_MAX_ATTEMPTS, 1);
// disable speculation: makes no sense to speculate an entire job
-// canSpeculateMaps = canSpeculateReduces = false; // [TODO: in old version, ultimately was from conf.getMapSpeculativeExecution(), conf.getReduceSpeculativeExecution()]
+ //canSpeculateMaps = canSpeculateReduces = false; // [TODO: in old
+ //version, ultimately was from conf.getMapSpeculativeExecution(),
+ //conf.getReduceSpeculativeExecution()]
} else {
StringBuilder msg = new StringBuilder();
msg.append("Not uberizing ").append(job.jobId).append(" because:");
@@ -1022,13 +999,6 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
if (UserGroupInformation.isSecurityEnabled()) {
tokenStorage.addAll(job.fsTokens);
}
-
- Path remoteJobTokenFile =
- new Path(job.remoteJobSubmitDir,
- MRJobConfig.APPLICATION_TOKENS_FILE);
- tokenStorage.writeTokenStorageFile(remoteJobTokenFile, job.conf);
- LOG.info("Writing back the job-token file on the remote file system:"
- + remoteJobTokenFile.toString());
}
/**
@@ -1138,7 +1108,7 @@ public class JobImpl implements org.apache.hadoop.mapreduce.v2.app.job.Job,
job.isUber()); //Will transition to state running. Currently in INITED
job.eventHandler.handle(new JobHistoryEvent(job.jobId, jie));
JobInfoChangeEvent jice = new JobInfoChangeEvent(job.oldJobId,
- job.submitTime, job.startTime);
+ job.appSubmitTime, job.startTime);
job.eventHandler.handle(new JobHistoryEvent(job.jobId, jice));
job.metrics.runningJob(job);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
index 495d00e22c9..713d17b83cc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskAttemptImpl.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.mapreduce.v2.app.job.impl;
-import java.io.File;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.nio.ByteBuffer;
@@ -53,7 +52,6 @@ import org.apache.hadoop.mapreduce.OutputCommitter;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TaskCounter;
import org.apache.hadoop.mapreduce.TypeConverter;
-import org.apache.hadoop.mapreduce.filecache.DistributedCache;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.MapAttemptFinishedEvent;
import org.apache.hadoop.mapreduce.jobhistory.ReduceAttemptFinishedEvent;
@@ -98,16 +96,18 @@ import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.util.StringUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.YarnException;
-import org.apache.hadoop.yarn.api.ApplicationConstants;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerToken;
import org.apache.hadoop.yarn.api.records.LocalResource;
import org.apache.hadoop.yarn.api.records.LocalResourceType;
import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
+import org.apache.hadoop.yarn.api.records.NodeId;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
@@ -116,9 +116,12 @@ import org.apache.hadoop.yarn.state.InvalidStateTransitonException;
import org.apache.hadoop.yarn.state.SingleArcTransition;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.Apps;
+import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.hadoop.yarn.util.RackResolver;
+
/**
* Implementation of TaskAttempt interface.
*/
@@ -156,6 +159,8 @@ public abstract class TaskAttemptImpl implements
private long finishTime;
private WrappedProgressSplitsBlock progressSplitBlock;
private int shufflePort = -1;
+ private String trackerName;
+ private int httpPort;
private static final CleanupContainerTransition CLEANUP_CONTAINER_TRANSITION =
new CleanupContainerTransition();
@@ -421,9 +426,10 @@ public abstract class TaskAttemptImpl implements
stateMachine;
private ContainerId containerID;
- private String nodeHostName;
+ private NodeId containerNodeId;
private String containerMgrAddress;
private String nodeHttpAddress;
+ private String nodeRackName;
private WrappedJvmID jvmID;
private ContainerToken containerToken;
private Resource assignedCapability;
@@ -434,6 +440,9 @@ public abstract class TaskAttemptImpl implements
//this is the last status reported by the REMOTE running attempt
private TaskAttemptStatus reportedStatus;
+
+ private static final String LINE_SEPARATOR = System
+ .getProperty("line.separator");
public TaskAttemptImpl(TaskId taskId, int i,
@SuppressWarnings("rawtypes") EventHandler eventHandler,
@@ -526,8 +535,10 @@ public abstract class TaskAttemptImpl implements
/**
* Create the {@link ContainerLaunchContext} for this attempt.
+ * @param applicationACLs
*/
- private ContainerLaunchContext createContainerLaunchContext() {
+ private ContainerLaunchContext createContainerLaunchContext(
+ Map applicationACLs) {
// Application resources
Map localResources =
@@ -611,7 +622,7 @@ public abstract class TaskAttemptImpl implements
serviceData.put(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID,
ShuffleHandler.serializeServiceData(jobToken));
- MRApps.addToEnvironment(
+ Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
getInitialClasspath());
@@ -628,17 +639,11 @@ public abstract class TaskAttemptImpl implements
jvmID);
// Construct the actual Container
- ContainerLaunchContext container =
- recordFactory.newRecordInstance(ContainerLaunchContext.class);
- container.setContainerId(containerID);
- container.setUser(conf.get(MRJobConfig.USER_NAME));
- container.setResource(assignedCapability);
- container.setLocalResources(localResources);
- container.setEnvironment(environment);
- container.setCommands(commands);
- container.setServiceData(serviceData);
- container.setContainerTokens(tokens);
-
+ ContainerLaunchContext container = BuilderUtils
+ .newContainerLaunchContext(containerID, conf
+ .get(MRJobConfig.USER_NAME), assignedCapability, localResources,
+ environment, commands, serviceData, tokens, applicationACLs);
+
return container;
}
@@ -723,6 +728,19 @@ public abstract class TaskAttemptImpl implements
readLock.unlock();
}
}
+
+ /**
+ * If container Assigned then return the node's rackname, otherwise null.
+ */
+ @Override
+ public String getNodeRackName() {
+ this.readLock.lock();
+ try {
+ return this.nodeRackName;
+ } finally {
+ this.readLock.unlock();
+ }
+ }
protected abstract org.apache.hadoop.mapred.Task createRemoteTask();
@@ -758,10 +776,16 @@ public abstract class TaskAttemptImpl implements
result.setStartTime(launchTime);
result.setFinishTime(finishTime);
result.setShuffleFinishTime(this.reportedStatus.shuffleFinishTime);
- result.setDiagnosticInfo(reportedStatus.diagnosticInfo);
+ result.setDiagnosticInfo(StringUtils.join(LINE_SEPARATOR, getDiagnostics()));
result.setPhase(reportedStatus.phase);
result.setStateString(reportedStatus.stateString);
result.setCounters(getCounters());
+ result.setContainerId(this.getAssignedContainerID());
+ result.setNodeManagerHost(trackerName);
+ result.setNodeManagerHttpPort(httpPort);
+ if (this.containerNodeId != null) {
+ result.setNodeManagerPort(this.containerNodeId.getPort());
+ }
return result;
} finally {
readLock.unlock();
@@ -855,7 +879,7 @@ public abstract class TaskAttemptImpl implements
private static long computeSlotMillis(TaskAttemptImpl taskAttempt) {
TaskType taskType = taskAttempt.getID().getTaskId().getTaskType();
int slotMemoryReq =
- taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
+ taskAttempt.getMemoryRequired(taskAttempt.conf, taskType);
int simSlotsRequired =
slotMemoryReq
/ (taskType == TaskType.MAP ? MAP_MEMORY_MB_DEFAULT
@@ -888,15 +912,20 @@ public abstract class TaskAttemptImpl implements
return jce;
}
- private static TaskAttemptUnsuccessfulCompletionEvent createTaskAttemptUnsuccessfulCompletionEvent(
- TaskAttemptImpl taskAttempt, TaskAttemptState attemptState) {
- TaskAttemptUnsuccessfulCompletionEvent tauce = new TaskAttemptUnsuccessfulCompletionEvent(
- TypeConverter.fromYarn(taskAttempt.attemptId),
- TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId().getTaskType()),
- attemptState.toString(), taskAttempt.finishTime,
- taskAttempt.nodeHostName == null ? "UNKNOWN" : taskAttempt.nodeHostName,
- taskAttempt.reportedStatus.diagnosticInfo.toString(),
- taskAttempt.getProgressSplitBlock().burst());
+ private static
+ TaskAttemptUnsuccessfulCompletionEvent
+ createTaskAttemptUnsuccessfulCompletionEvent(TaskAttemptImpl taskAttempt,
+ TaskAttemptState attemptState) {
+ TaskAttemptUnsuccessfulCompletionEvent tauce =
+ new TaskAttemptUnsuccessfulCompletionEvent(
+ TypeConverter.fromYarn(taskAttempt.attemptId),
+ TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId()
+ .getTaskType()), attemptState.toString(),
+ taskAttempt.finishTime,
+ taskAttempt.containerMgrAddress == null ? "UNKNOWN"
+ : taskAttempt.containerMgrAddress, StringUtils.join(
+ LINE_SEPARATOR, taskAttempt.getDiagnostics()), taskAttempt
+ .getProgressSplitBlock().burst());
return tauce;
}
@@ -988,17 +1017,19 @@ public abstract class TaskAttemptImpl implements
private static class ContainerAssignedTransition implements
SingleArcTransition {
- @SuppressWarnings("unchecked")
+ @SuppressWarnings({ "unchecked", "deprecation" })
@Override
public void transition(final TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
- TaskAttemptContainerAssignedEvent cEvent =
+ final TaskAttemptContainerAssignedEvent cEvent =
(TaskAttemptContainerAssignedEvent) event;
taskAttempt.containerID = cEvent.getContainer().getId();
- taskAttempt.nodeHostName = cEvent.getContainer().getNodeId().getHost();
- taskAttempt.containerMgrAddress = cEvent.getContainer().getNodeId()
+ taskAttempt.containerNodeId = cEvent.getContainer().getNodeId();
+ taskAttempt.containerMgrAddress = taskAttempt.containerNodeId
.toString();
taskAttempt.nodeHttpAddress = cEvent.getContainer().getNodeHttpAddress();
+ taskAttempt.nodeRackName = RackResolver.resolve(
+ taskAttempt.containerNodeId.getHost()).getNetworkLocation();
taskAttempt.containerToken = cEvent.getContainer().getContainerToken();
taskAttempt.assignedCapability = cEvent.getContainer().getResource();
// this is a _real_ Task (classic Hadoop mapred flavor):
@@ -1006,6 +1037,7 @@ public abstract class TaskAttemptImpl implements
taskAttempt.jvmID = new WrappedJvmID(
taskAttempt.remoteTask.getTaskID().getJobID(),
taskAttempt.remoteTask.isMapTask(), taskAttempt.containerID.getId());
+ taskAttempt.taskAttemptListener.registerPendingTask(taskAttempt.jvmID);
//launch the container
//create the container object to be launched for a given Task attempt
@@ -1015,7 +1047,8 @@ public abstract class TaskAttemptImpl implements
taskAttempt.containerMgrAddress, taskAttempt.containerToken) {
@Override
public ContainerLaunchContext getContainer() {
- return taskAttempt.createContainerLaunchContext();
+ return taskAttempt.createContainerLaunchContext(cEvent
+ .getApplicationACLs());
}
@Override
public Task getRemoteTask() { // classic mapred Task, not YARN version
@@ -1095,14 +1128,18 @@ public abstract class TaskAttemptImpl implements
//set the launch time
taskAttempt.launchTime = taskAttempt.clock.getTime();
+ taskAttempt.shufflePort = event.getShufflePort();
+
// register it to TaskAttemptListener so that it start listening
// for it
- taskAttempt.taskAttemptListener.register(
+ taskAttempt.taskAttemptListener.registerLaunchedTask(
taskAttempt.attemptId, taskAttempt.remoteTask, taskAttempt.jvmID);
//TODO Resolve to host / IP in case of a local address.
InetSocketAddress nodeHttpInetAddr =
NetUtils.createSocketAddr(taskAttempt.nodeHttpAddress); // TODO:
// Costly?
+ taskAttempt.trackerName = nodeHttpInetAddr.getHostName();
+ taskAttempt.httpPort = nodeHttpInetAddr.getPort();
JobCounterUpdateEvent jce =
new JobCounterUpdateEvent(taskAttempt.attemptId.getTaskId()
.getJobId());
@@ -1112,11 +1149,15 @@ public abstract class TaskAttemptImpl implements
, 1);
taskAttempt.eventHandler.handle(jce);
+ LOG.info("TaskAttempt: [" + taskAttempt.attemptId
+ + "] using containerId: [" + taskAttempt.containerID + " on NM: ["
+ + taskAttempt.containerMgrAddress + "]");
TaskAttemptStartedEvent tase =
new TaskAttemptStartedEvent(TypeConverter.fromYarn(taskAttempt.attemptId),
TypeConverter.fromYarn(taskAttempt.attemptId.getTaskId().getTaskType()),
taskAttempt.launchTime,
- nodeHttpInetAddr.getHostName(), nodeHttpInetAddr.getPort());
+ nodeHttpInetAddr.getHostName(), nodeHttpInetAddr.getPort(),
+ taskAttempt.shufflePort, taskAttempt.containerID);
taskAttempt.eventHandler.handle
(new JobHistoryEvent(taskAttempt.attemptId.getTaskId().getJobId(), tase));
taskAttempt.eventHandler.handle
@@ -1125,7 +1166,6 @@ public abstract class TaskAttemptImpl implements
//make remoteTask reference as null as it is no more needed
//and free up the memory
taskAttempt.remoteTask = null;
- taskAttempt.shufflePort = event.getShufflePort();
//tell the Task that attempt has started
taskAttempt.eventHandler.handle(new TaskTAttemptEvent(
@@ -1152,6 +1192,7 @@ public abstract class TaskAttemptImpl implements
@Override
public void transition(TaskAttemptImpl taskAttempt,
TaskAttemptEvent event) {
+ @SuppressWarnings("deprecation")
TaskAttemptContext taskContext =
new TaskAttemptContextImpl(new JobConf(taskAttempt.conf),
TypeConverter.fromYarn(taskAttempt.attemptId));
@@ -1229,7 +1270,10 @@ public abstract class TaskAttemptImpl implements
TypeConverter.fromYarn(attemptId.getTaskId().getTaskType()),
state.toString(),
this.reportedStatus.mapFinishTime,
- finishTime, this.nodeHostName == null ? "UNKNOWN" : this.nodeHostName,
+ finishTime,
+ this.containerNodeId == null ? "UNKNOWN"
+ : this.containerNodeId.getHost(),
+ this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
this.reportedStatus.stateString,
TypeConverter.fromYarn(getCounters()),
getProgressSplitBlock().burst());
@@ -1242,7 +1286,10 @@ public abstract class TaskAttemptImpl implements
state.toString(),
this.reportedStatus.shuffleFinishTime,
this.reportedStatus.sortFinishTime,
- finishTime, this.containerMgrAddress == null ? "UNKNOWN" : this.containerMgrAddress,
+ finishTime,
+ this.containerNodeId == null ? "UNKNOWN"
+ : this.containerNodeId.getHost(),
+ this.nodeRackName == null ? "UNKNOWN" : this.nodeRackName,
this.reportedStatus.stateString,
TypeConverter.fromYarn(getCounters()),
getProgressSplitBlock().burst());
@@ -1352,8 +1399,6 @@ public abstract class TaskAttemptImpl implements
(new SpeculatorEvent
(taskAttempt.reportedStatus, taskAttempt.clock.getTime()));
- //add to diagnostic
- taskAttempt.addDiagnosticInfo(newReportedStatus.diagnosticInfo);
taskAttempt.updateProgressSplits();
//if fetch failures are present, send the fetch failure event to job
@@ -1381,7 +1426,6 @@ public abstract class TaskAttemptImpl implements
private void initTaskAttemptStatus(TaskAttemptStatus result) {
result.progress = 0.0f;
- result.diagnosticInfo = "";
result.phase = Phase.STARTING;
result.stateString = "NEW";
result.taskState = TaskAttemptState.NEW;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
index 71f8823e687..a7c64915124 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TaskImpl.java
@@ -441,10 +441,20 @@ public abstract class TaskImpl implements Task, EventHandler {
float progress = 0f;
TaskAttempt result = null;
for (TaskAttempt at : attempts.values()) {
+ switch (at.getState()) {
+
+ // ignore all failed task attempts
+ case FAIL_CONTAINER_CLEANUP:
+ case FAIL_TASK_CLEANUP:
+ case FAILED:
+ case KILL_CONTAINER_CLEANUP:
+ case KILL_TASK_CLEANUP:
+ case KILLED:
+ continue;
+ }
if (result == null) {
result = at; //The first time around
}
- //TODO: consider the nextAttemptNumber only if it is not failed/killed ?
// calculate the best progress
if (at.getProgress() > progress) {
result = at;
@@ -496,7 +506,7 @@ public abstract class TaskImpl implements Task, EventHandler {
break;
case 1:
- Map newAttempts
+ Map newAttempts
= new LinkedHashMap(maxAttempts);
newAttempts.putAll(attempts);
attempts = newAttempts;
@@ -558,7 +568,8 @@ public abstract class TaskImpl implements Task, EventHandler {
//raise the completion event only if the container is assigned
// to nextAttemptNumber
if (attempt.getNodeHttpAddress() != null) {
- TaskAttemptCompletionEvent tce = recordFactory.newRecordInstance(TaskAttemptCompletionEvent.class);
+ TaskAttemptCompletionEvent tce = recordFactory
+ .newRecordInstance(TaskAttemptCompletionEvent.class);
tce.setEventId(-1);
tce.setMapOutputServerAddress("http://"
+ attempt.getNodeHttpAddress().split(":")[0] + ":"
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/package-info.java
new file mode 100644
index 00000000000..9fb5cedf4a1
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/impl/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/package-info.java
new file mode 100644
index 00000000000..ae4331cbf86
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/job/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.job;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
index cc41db1bc41..12ac363875b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncher.java
@@ -19,6 +19,7 @@
package org.apache.hadoop.mapreduce.v2.app.launcher;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.yarn.event.EventHandler;
public interface ContainerLauncher
@@ -28,4 +29,12 @@ public interface ContainerLauncher
CONTAINER_REMOTE_LAUNCH,
CONTAINER_REMOTE_CLEANUP
}
+
+ // Not a documented config. Only used for tests
+ static final String MR_AM_NM_COMMAND_TIMEOUT = MRJobConfig.MR_AM_PREFIX
+ + "nm-command-timeout";
+ /**
+ * Maximum of 1 minute timeout for a Node to react to the command
+ */
+ static final int DEFAULT_NM__COMMAND_TIMEOUT = 60000;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
index 6e7996b6da0..083ed58d9d3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/ContainerLauncherImpl.java
@@ -21,10 +21,15 @@ package org.apache.hadoop.mapreduce.v2.app.launcher;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.security.PrivilegedAction;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.Map;
+import java.util.Set;
+import java.util.Timer;
+import java.util.TimerTask;
import java.util.concurrent.BlockingQueue;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -43,7 +48,6 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.StringUtils;
@@ -55,21 +59,23 @@ import org.apache.hadoop.yarn.api.protocolrecords.StopContainerRequest;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
import org.apache.hadoop.yarn.api.records.ContainerToken;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.ContainerManagerSecurityInfo;
import org.apache.hadoop.yarn.security.ContainerTokenIdentifier;
import org.apache.hadoop.yarn.service.AbstractService;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
/**
* This class is responsible for launching of containers.
*/
public class ContainerLauncherImpl extends AbstractService implements
ContainerLauncher {
- private static final Log LOG = LogFactory.getLog(ContainerLauncherImpl.class);
+ static final Log LOG = LogFactory.getLog(ContainerLauncherImpl.class);
+
+ int nmTimeOut;
private AppContext context;
private ThreadPoolExecutor launcherPool;
@@ -79,10 +85,14 @@ public class ContainerLauncherImpl extends AbstractService implements
private BlockingQueue eventQueue =
new LinkedBlockingQueue();
private RecordFactory recordFactory;
- //have a cache/map of UGIs so as to avoid creating too many RPC
- //client connection objects to the same NodeManager
- private ConcurrentMap ugiMap =
- new ConcurrentHashMap();
+
+ // To track numNodes.
+ Set allNodes = new HashSet();
+
+ // have a cache/map of proxies so as to avoid creating multiple RPC
+ // client connection objects for the same container.
+ private Map clientCache
+ = new HashMap();
public ContainerLauncherImpl(AppContext context) {
super(ContainerLauncherImpl.class.getName());
@@ -95,14 +105,21 @@ public class ContainerLauncherImpl extends AbstractService implements
this.limitOnPoolSize = conf.getInt(
MRJobConfig.MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT,
MRJobConfig.DEFAULT_MR_AM_CONTAINERLAUNCHER_THREAD_COUNT_LIMIT);
+ this.nmTimeOut = conf.getInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT,
+ ContainerLauncher.DEFAULT_NM__COMMAND_TIMEOUT);
super.init(conf);
}
public void start() {
+
+ ThreadFactory tf = new ThreadFactoryBuilder().setNameFormat(
+ "ContainerLauncher #%d").setDaemon(true).build();
+
// Start with a default core-pool size of 10 and change it dynamically.
launcherPool = new ThreadPoolExecutor(INITIAL_POOL_SIZE,
Integer.MAX_VALUE, 1, TimeUnit.HOURS,
- new LinkedBlockingQueue());
+ new LinkedBlockingQueue(),
+ tf);
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
@@ -123,14 +140,17 @@ public class ContainerLauncherImpl extends AbstractService implements
// nodes where containers will run at *this* point of time. This is
// *not* the cluster size and doesn't need to be.
- int numNodes = ugiMap.size();
+ int numNodes = allNodes.size();
int idealPoolSize = Math.min(limitOnPoolSize, numNodes);
if (poolSize <= idealPoolSize) {
// Bump up the pool size to idealPoolSize+INITIAL_POOL_SIZE, the
// later is just a buffer so we are not always increasing the
// pool-size
- launcherPool.setCorePoolSize(idealPoolSize + INITIAL_POOL_SIZE);
+ int newPoolSize = idealPoolSize + INITIAL_POOL_SIZE;
+ LOG.info("Setting ContainerLauncher pool size to "
+ + newPoolSize);
+ launcherPool.setCorePoolSize(newPoolSize);
}
}
@@ -143,13 +163,14 @@ public class ContainerLauncherImpl extends AbstractService implements
}
}
});
+ eventHandlingThread.setName("ContainerLauncher Event Handler");
eventHandlingThread.start();
super.start();
}
public void stop() {
eventHandlingThread.interrupt();
- launcherPool.shutdown();
+ launcherPool.shutdownNow();
super.stop();
}
@@ -159,31 +180,57 @@ public class ContainerLauncherImpl extends AbstractService implements
UserGroupInformation user = UserGroupInformation.getCurrentUser();
- if (UserGroupInformation.isSecurityEnabled()) {
+ synchronized (this.clientCache) {
- Token token = new Token(
- containerToken.getIdentifier().array(), containerToken
- .getPassword().array(), new Text(containerToken.getKind()),
- new Text(containerToken.getService()));
- // the user in createRemoteUser in this context is not important
- UserGroupInformation ugi = UserGroupInformation
- .createRemoteUser(containerManagerBindAddr);
- ugi.addToken(token);
- ugiMap.putIfAbsent(containerManagerBindAddr, ugi);
+ if (this.clientCache.containsKey(containerID)) {
+ return this.clientCache.get(containerID);
+ }
- user = ugiMap.get(containerManagerBindAddr);
+ this.allNodes.add(containerManagerBindAddr);
+
+ if (UserGroupInformation.isSecurityEnabled()) {
+ Token token = new Token(
+ containerToken.getIdentifier().array(), containerToken
+ .getPassword().array(), new Text(containerToken.getKind()),
+ new Text(containerToken.getService()));
+ // the user in createRemoteUser in this context has to be ContainerID
+ user = UserGroupInformation.createRemoteUser(containerID.toString());
+ user.addToken(token);
+ }
+
+ ContainerManager proxy = user
+ .doAs(new PrivilegedAction() {
+ @Override
+ public ContainerManager run() {
+ YarnRPC rpc = YarnRPC.create(getConfig());
+ return (ContainerManager) rpc.getProxy(ContainerManager.class,
+ NetUtils.createSocketAddr(containerManagerBindAddr),
+ getConfig());
+ }
+ });
+ this.clientCache.put(containerID, proxy);
+ return proxy;
+ }
+ }
+
+ private static class CommandTimer extends TimerTask {
+ private final Thread commandThread;
+ protected final ContainerLauncherEvent event;
+ protected final String message;
+
+ public CommandTimer(Thread thread, ContainerLauncherEvent event) {
+ this.commandThread = thread;
+ this.event = event;
+ this.message = "Couldn't complete " + event.getType() + " on "
+ + event.getContainerID() + "/" + event.getTaskAttemptID()
+ + ". Interrupting and returning";
+ }
+
+ @Override
+ public void run() {
+ LOG.warn(this.message);
+ this.commandThread.interrupt();
}
- ContainerManager proxy =
- user.doAs(new PrivilegedAction() {
- @Override
- public ContainerManager run() {
- YarnRPC rpc = YarnRPC.create(getConfig());
- return (ContainerManager) rpc.getProxy(ContainerManager.class,
- NetUtils.createSocketAddr(containerManagerBindAddr),
- getConfig());
- }
- });
- return proxy;
}
/**
@@ -206,27 +253,53 @@ public class ContainerLauncherImpl extends AbstractService implements
final String containerManagerBindAddr = event.getContainerMgrAddress();
ContainerId containerID = event.getContainerID();
ContainerToken containerToken = event.getContainerToken();
+ TaskAttemptId taskAttemptID = event.getTaskAttemptID();
+
+ Timer timer = new Timer(true);
switch(event.getType()) {
case CONTAINER_REMOTE_LAUNCH:
- ContainerRemoteLaunchEvent launchEv = (ContainerRemoteLaunchEvent) event;
+ ContainerRemoteLaunchEvent launchEvent
+ = (ContainerRemoteLaunchEvent) event;
- TaskAttemptId taskAttemptID = launchEv.getTaskAttemptID();
try {
-
- ContainerManager proxy =
- getCMProxy(containerID, containerManagerBindAddr, containerToken);
-
+ timer.schedule(new CommandTimer(Thread.currentThread(), event),
+ nmTimeOut);
+
+ ContainerManager proxy = getCMProxy(containerID,
+ containerManagerBindAddr, containerToken);
+
+ // Interruped during getProxy, but that didn't throw exception
+ if (Thread.currentThread().isInterrupted()) {
+ // The timer cancelled the command in the mean while.
+ String message = "Start-container for " + event.getContainerID()
+ + " got interrupted. Returning.";
+ sendContainerLaunchFailedMsg(taskAttemptID, message);
+ return;
+ }
+
// Construct the actual Container
ContainerLaunchContext containerLaunchContext =
- launchEv.getContainer();
+ launchEvent.getContainer();
// Now launch the actual container
StartContainerRequest startRequest = recordFactory
.newRecordInstance(StartContainerRequest.class);
startRequest.setContainerLaunchContext(containerLaunchContext);
StartContainerResponse response = proxy.startContainer(startRequest);
+
+ // container started properly. Stop the timer
+ timer.cancel();
+ if (Thread.currentThread().isInterrupted()) {
+ // The timer cancelled the command in the mean while, but
+ // startContainer didn't throw exception
+ String message = "Start-container for " + event.getContainerID()
+ + " got interrupted. Returning.";
+ sendContainerLaunchFailedMsg(taskAttemptID, message);
+ return;
+ }
+
ByteBuffer portInfo = response
.getServiceResponse(ShuffleHandler.MAPREDUCE_SHUFFLE_SERVICEID);
int port = -1;
@@ -248,12 +321,9 @@ public class ContainerLauncherImpl extends AbstractService implements
} catch (Throwable t) {
String message = "Container launch failed for " + containerID
+ " : " + StringUtils.stringifyException(t);
- LOG.error(message);
- context.getEventHandler().handle(
- new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
- context.getEventHandler().handle(
- new TaskAttemptEvent(taskAttemptID,
- TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
+ sendContainerLaunchFailedMsg(taskAttemptID, message);
+ } finally {
+ timer.cancel();
}
break;
@@ -265,24 +335,44 @@ public class ContainerLauncherImpl extends AbstractService implements
eventQueue.remove(event); // TODO: Any synchro needed?
//deallocate the container
context.getEventHandler().handle(
- new ContainerAllocatorEvent(event.getTaskAttemptID(),
- ContainerAllocator.EventType.CONTAINER_DEALLOCATE));
+ new ContainerAllocatorEvent(taskAttemptID,
+ ContainerAllocator.EventType.CONTAINER_DEALLOCATE));
} else {
+
try {
- ContainerManager proxy =
- getCMProxy(containerID, containerManagerBindAddr, containerToken);
- // TODO:check whether container is launched
+ timer.schedule(new CommandTimer(Thread.currentThread(), event),
+ nmTimeOut);
- // kill the remote container if already launched
- StopContainerRequest stopRequest = recordFactory
- .newRecordInstance(StopContainerRequest.class);
- stopRequest.setContainerId(event.getContainerID());
- proxy.stopContainer(stopRequest);
+ ContainerManager proxy = getCMProxy(containerID,
+ containerManagerBindAddr, containerToken);
+ if (Thread.currentThread().isInterrupted()) {
+ // The timer cancelled the command in the mean while. No need to
+ // return, send cleanedup event anyways.
+ LOG.info("Stop-container for " + event.getContainerID()
+ + " got interrupted.");
+ } else {
+
+ // TODO:check whether container is launched
+
+ // kill the remote container if already launched
+ StopContainerRequest stopRequest = recordFactory
+ .newRecordInstance(StopContainerRequest.class);
+ stopRequest.setContainerId(event.getContainerID());
+ proxy.stopContainer(stopRequest);
+ }
} catch (Throwable t) {
- //ignore the cleanup failure
- LOG.warn("cleanup failed for container " + event.getContainerID() ,
- t);
+ // ignore the cleanup failure
+ String message = "cleanup failed for container "
+ + event.getContainerID() + " : "
+ + StringUtils.stringifyException(t);
+ context.getEventHandler()
+ .handle(
+ new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID,
+ message));
+ LOG.warn(message);
+ } finally {
+ timer.cancel();
}
// after killing, send killed event to taskattempt
@@ -293,7 +383,17 @@ public class ContainerLauncherImpl extends AbstractService implements
break;
}
}
-
+ }
+
+ @SuppressWarnings("unchecked")
+ void sendContainerLaunchFailedMsg(TaskAttemptId taskAttemptID,
+ String message) {
+ LOG.error(message);
+ context.getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(taskAttemptID, message));
+ context.getEventHandler().handle(
+ new TaskAttemptEvent(taskAttemptID,
+ TaskAttemptEventType.TA_CONTAINER_LAUNCH_FAILED));
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/package-info.java
new file mode 100644
index 00000000000..3d95dbfce93
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/launcher/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.launcher;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
index 0261e18b56f..f0ce272bb8f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/LocalContainerAllocator.java
@@ -23,19 +23,23 @@ import java.util.concurrent.atomic.AtomicInteger;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.event.JobCounterUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMCommunicator;
+import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateRequest;
import org.apache.hadoop.yarn.api.protocolrecords.AllocateResponse;
import org.apache.hadoop.yarn.api.records.AMResponse;
-import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.NodeId;
@@ -57,8 +61,10 @@ public class LocalContainerAllocator extends RMCommunicator
LogFactory.getLog(LocalContainerAllocator.class);
private final EventHandler eventHandler;
- private final ApplicationId appID;
+// private final ApplicationId appID;
private AtomicInteger containerCount = new AtomicInteger();
+ private long retryInterval;
+ private long retrystartTime;
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
@@ -67,7 +73,19 @@ public class LocalContainerAllocator extends RMCommunicator
AppContext context) {
super(clientService, context);
this.eventHandler = context.getEventHandler();
- this.appID = context.getApplicationID();
+// this.appID = context.getApplicationID();
+
+ }
+
+ @Override
+ public void init(Configuration conf) {
+ super.init(conf);
+ retryInterval =
+ getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
+ MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
+ // Init startTime to current time. If all goes well, it will be reset after
+ // first attempt to contact RM.
+ retrystartTime = System.currentTimeMillis();
}
@Override
@@ -77,10 +95,32 @@ public class LocalContainerAllocator extends RMCommunicator
.getApplicationProgress(), new ArrayList(),
new ArrayList());
AllocateResponse allocateResponse = scheduler.allocate(allocateRequest);
- AMResponse response = allocateResponse.getAMResponse();
+ AMResponse response;
+ try {
+ response = allocateResponse.getAMResponse();
+ // Reset retry count if no exception occurred.
+ retrystartTime = System.currentTimeMillis();
+ } catch (Exception e) {
+ // This can happen when the connection to the RM has gone down. Keep
+ // re-trying until the retryInterval has expired.
+ if (System.currentTimeMillis() - retrystartTime >= retryInterval) {
+ eventHandler.handle(new JobEvent(this.getJob().getID(),
+ JobEventType.INTERNAL_ERROR));
+ throw new YarnException("Could not contact RM after " +
+ retryInterval + " milliseconds.");
+ }
+ // Throw this up to the caller, which may decide to ignore it and
+ // continue to attempt to contact the RM.
+ throw e;
+ }
if (response.getReboot()) {
- // TODO
LOG.info("Event from RM: shutting down Application Master");
+ // This can happen if the RM has been restarted. If it is in that state,
+ // this application must clean itself up.
+ eventHandler.handle(new JobEvent(this.getJob().getID(),
+ JobEventType.INTERNAL_ERROR));
+ throw new YarnException("Resource Manager doesn't recognize AttemptId: " +
+ this.getContext().getApplicationID());
}
}
@@ -112,7 +152,7 @@ public class LocalContainerAllocator extends RMCommunicator
eventHandler.handle(jce);
}
eventHandler.handle(new TaskAttemptContainerAssignedEvent(
- event.getAttemptID(), container));
+ event.getAttemptID(), container, applicationACLs));
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/package-info.java
new file mode 100644
index 00000000000..ca4de03c43e
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/local/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.local;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/package-info.java
new file mode 100644
index 00000000000..1eb45430194
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/metrics/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.metrics;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/package-info.java
new file mode 100644
index 00000000000..eb912e5faa6
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java
index 8005714389a..95c4919d224 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/Recovery.java
@@ -18,8 +18,10 @@
package org.apache.hadoop.mapreduce.v2.app.recover;
+import java.util.List;
import java.util.Set;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.event.Dispatcher;
@@ -31,4 +33,6 @@ public interface Recovery {
Clock getClock();
Set getCompletedTasks();
+
+ List getAMInfos();
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
index ca213f17f86..843e666c873 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/RecoveryService.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.mapreduce.v2.app.recover;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
@@ -31,16 +32,23 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.JobInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerLaunchedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
@@ -51,12 +59,14 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherEvent;
import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerRemoteLaunchEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocator;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerAllocatorEvent;
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleaner;
import org.apache.hadoop.mapreduce.v2.app.taskclean.TaskCleanupEvent;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.yarn.Clock;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.Container;
@@ -70,6 +80,8 @@ import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.service.CompositeService;
import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.apache.hadoop.yarn.util.ConverterUtils;
/*
* Recovers the completed tasks from the previous life of Application Master.
@@ -82,9 +94,6 @@ import org.apache.hadoop.yarn.service.Service;
//TODO:
//task cleanup for all non completed tasks
-//change job output committer to have
-// - atomic job output promotion
-// - recover output of completed tasks
public class RecoveryService extends CompositeService implements Recovery {
@@ -93,6 +102,7 @@ public class RecoveryService extends CompositeService implements Recovery {
private static final Log LOG = LogFactory.getLog(RecoveryService.class);
private final ApplicationAttemptId applicationAttemptId;
+ private final OutputCommitter committer;
private final Dispatcher dispatcher;
private final ControlledClock clock;
@@ -106,9 +116,10 @@ public class RecoveryService extends CompositeService implements Recovery {
private volatile boolean recoveryMode = false;
public RecoveryService(ApplicationAttemptId applicationAttemptId,
- Clock clock) {
+ Clock clock, OutputCommitter committer) {
super("RecoveringDispatcher");
this.applicationAttemptId = applicationAttemptId;
+ this.committer = committer;
this.dispatcher = new RecoveryDispatcher();
this.clock = new ControlledClock(clock);
addService((Service) dispatcher);
@@ -120,17 +131,17 @@ public class RecoveryService extends CompositeService implements Recovery {
// parse the history file
try {
parse();
- if (completedTasks.size() > 0) {
- recoveryMode = true;
- LOG.info("SETTING THE RECOVERY MODE TO TRUE. NO OF COMPLETED TASKS " +
- "TO RECOVER " + completedTasks.size());
- LOG.info("Job launch time " + jobInfo.getLaunchTime());
- clock.setTime(jobInfo.getLaunchTime());
- }
- } catch (IOException e) {
+ } catch (Exception e) {
LOG.warn(e);
LOG.warn("Could not parse the old history file. Aborting recovery. "
- + "Starting afresh.");
+ + "Starting afresh.", e);
+ }
+ if (completedTasks.size() > 0) {
+ recoveryMode = true;
+ LOG.info("SETTING THE RECOVERY MODE TO TRUE. NO OF COMPLETED TASKS "
+ + "TO RECOVER " + completedTasks.size());
+ LOG.info("Job launch time " + jobInfo.getLaunchTime());
+ clock.setTime(jobInfo.getLaunchTime());
}
}
@@ -149,6 +160,25 @@ public class RecoveryService extends CompositeService implements Recovery {
return completedTasks.keySet();
}
+ @Override
+ public List getAMInfos() {
+ if (jobInfo == null || jobInfo.getAMInfos() == null) {
+ return new LinkedList();
+ }
+ List amInfos = new LinkedList();
+ for (org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.AMInfo jhAmInfo : jobInfo
+ .getAMInfos()) {
+ AMInfo amInfo =
+ MRBuilderUtils.newAMInfo(jhAmInfo.getAppAttemptId(),
+ jhAmInfo.getStartTime(), jhAmInfo.getContainerId(),
+ jhAmInfo.getNodeManagerHost(), jhAmInfo.getNodeManagerPort(),
+ jhAmInfo.getNodeManagerHttpPort());
+
+ amInfos.add(amInfo);
+ }
+ return amInfos;
+ }
+
private void parse() throws IOException {
// TODO: parse history file based on startCount
String jobName =
@@ -297,14 +327,28 @@ public class RecoveryService extends CompositeService implements Recovery {
TaskAttemptId aId = ((ContainerRemoteLaunchEvent) event)
.getTaskAttemptID();
TaskAttemptInfo attInfo = getTaskAttemptInfo(aId);
- //TODO need to get the real port number MAPREDUCE-2666
- actualHandler.handle(new TaskAttemptContainerLaunchedEvent(aId, -1));
+ actualHandler.handle(new TaskAttemptContainerLaunchedEvent(aId,
+ attInfo.getShufflePort()));
// send the status update event
sendStatusUpdateEvent(aId, attInfo);
TaskAttemptState state = TaskAttemptState.valueOf(attInfo.getTaskStatus());
switch (state) {
case SUCCEEDED:
+ //recover the task output
+ TaskAttemptContext taskContext = new TaskAttemptContextImpl(getConfig(),
+ attInfo.getAttemptId());
+ try {
+ committer.recoverTask(taskContext);
+ } catch (IOException e) {
+ actualHandler.handle(new JobDiagnosticsUpdateEvent(
+ aId.getTaskId().getJobId(), "Error in recovering task output " +
+ e.getMessage()));
+ actualHandler.handle(new JobEvent(aId.getTaskId().getJobId(),
+ JobEventType.INTERNAL_ERROR));
+ }
+ LOG.info("Recovered output from task attempt " + attInfo.getAttemptId());
+
// send the done event
LOG.info("Sending done event to " + aId);
actualHandler.handle(new TaskAttemptEvent(aId,
@@ -324,6 +368,16 @@ public class RecoveryService extends CompositeService implements Recovery {
return;
}
+ else if (event.getType() ==
+ ContainerLauncher.EventType.CONTAINER_REMOTE_CLEANUP) {
+ TaskAttemptId aId = ((ContainerLauncherEvent) event)
+ .getTaskAttemptID();
+ actualHandler.handle(
+ new TaskAttemptEvent(aId,
+ TaskAttemptEventType.TA_CONTAINER_CLEANED));
+ return;
+ }
+
// delegate to the actual handler
actualHandler.handle(event);
}
@@ -334,7 +388,6 @@ public class RecoveryService extends CompositeService implements Recovery {
TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
taskAttemptStatus.id = yarnAttemptID;
taskAttemptStatus.progress = 1.0f;
- taskAttemptStatus.diagnosticInfo = "";
taskAttemptStatus.stateString = attemptInfo.getTaskStatus();
// taskAttemptStatus.outputSize = attemptInfo.getOutputSize();
taskAttemptStatus.phase = Phase.CLEANUP;
@@ -352,18 +405,17 @@ public class RecoveryService extends CompositeService implements Recovery {
private void sendAssignedEvent(TaskAttemptId yarnAttemptID,
TaskAttemptInfo attemptInfo) {
LOG.info("Sending assigned event to " + yarnAttemptID);
- ContainerId cId = recordFactory
- .newRecordInstance(ContainerId.class);
- Container container = recordFactory
- .newRecordInstance(Container.class);
- container.setId(cId);
- container.setNodeId(recordFactory
- .newRecordInstance(NodeId.class));
- container.setContainerToken(null);
- container.setNodeHttpAddress(attemptInfo.getHostname() + ":" +
- attemptInfo.getHttpPort());
+ ContainerId cId = attemptInfo.getContainerId();
+
+ NodeId nodeId = ConverterUtils.toNodeId(attemptInfo.getHostname());
+ // Resource/Priority/ApplicationACLs are only needed while launching the
+ // container on an NM, these are already completed tasks, so setting them
+ // to null
+ Container container = BuilderUtils.newContainer(cId, nodeId,
+ attemptInfo.getTrackerName() + ":" + attemptInfo.getHttpPort(),
+ null, null, null);
actualHandler.handle(new TaskAttemptContainerAssignedEvent(yarnAttemptID,
- container));
+ container, null));
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/package-info.java
new file mode 100644
index 00000000000..400fdfaea63
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/recover/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.recover;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
index 91d60c20817..68d9c2462b4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMCommunicator.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.rm;
import java.io.IOException;
import java.security.PrivilegedAction;
+import java.util.Map;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -35,7 +36,6 @@ import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.security.SecurityInfo;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
@@ -47,6 +47,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterReque
import org.apache.hadoop.yarn.api.protocolrecords.RegisterApplicationMasterResponse;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ApplicationAccessType;
import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
@@ -54,7 +55,6 @@ import org.apache.hadoop.yarn.event.EventHandler;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.ipc.YarnRPC;
-import org.apache.hadoop.yarn.security.SchedulerSecurityInfo;
import org.apache.hadoop.yarn.service.AbstractService;
/**
@@ -73,6 +73,7 @@ public abstract class RMCommunicator extends AbstractService {
protected int lastResponseID;
private Resource minContainerCapability;
private Resource maxContainerCapability;
+ protected Map applicationACLs;
private final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
@@ -160,6 +161,7 @@ public abstract class RMCommunicator extends AbstractService {
scheduler.registerApplicationMaster(request);
minContainerCapability = response.getMinimumResourceCapability();
maxContainerCapability = response.getMaximumResourceCapability();
+ this.applicationACLs = response.getApplicationACLs();
LOG.info("minContainerCapability: " + minContainerCapability.getMemory());
LOG.info("maxContainerCapability: " + maxContainerCapability.getMemory());
} catch (Exception are) {
@@ -231,6 +233,9 @@ public abstract class RMCommunicator extends AbstractService {
Thread.sleep(rmPollInterval);
try {
heartbeat();
+ } catch (YarnException e) {
+ LOG.error("Error communicating with RM: " + e.getMessage() , e);
+ return;
} catch (Exception e) {
LOG.error("ERROR IN CONTACTING RM. ", e);
// TODO: for other exceptions
@@ -242,6 +247,7 @@ public abstract class RMCommunicator extends AbstractService {
}
}
});
+ allocatorThread.setName("RMCommunicator Allocator");
allocatorThread.start();
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
index 7b75cd1fbd7..e8588e5cd0e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerAllocator.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.mapreduce.v2.app.rm;
-import java.net.InetAddress;
-import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
@@ -37,7 +35,12 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.JobCounter;
+import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.mapreduce.v2.app.AppContext;
@@ -50,6 +53,7 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssigned
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.records.AMResponse;
import org.apache.hadoop.yarn.api.records.Container;
import org.apache.hadoop.yarn.api.records.ContainerId;
@@ -125,7 +129,9 @@ public class RMContainerAllocator extends RMContainerRequestor
private float maxReduceRampupLimit = 0;
private float maxReducePreemptionLimit = 0;
private float reduceSlowStart = 0;
-
+ private long retryInterval;
+ private long retrystartTime;
+
public RMContainerAllocator(ClientService clientService, AppContext context) {
super(clientService, context);
}
@@ -143,6 +149,11 @@ public class RMContainerAllocator extends RMContainerRequestor
MRJobConfig.MR_AM_JOB_REDUCE_PREEMPTION_LIMIT,
MRJobConfig.DEFAULT_MR_AM_JOB_REDUCE_PREEMPTION_LIMIT);
RackResolver.init(conf);
+ retryInterval = getConfig().getLong(MRJobConfig.MR_AM_TO_RM_WAIT_INTERVAL_MS,
+ MRJobConfig.DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS);
+ // Init startTime to current time. If all goes well, it will be reset after
+ // first attempt to contact RM.
+ retrystartTime = System.currentTimeMillis();
}
@Override
@@ -169,6 +180,7 @@ public class RMContainerAllocator extends RMContainerRequestor
LOG.info("Final Stats: " + getStat());
}
+ @SuppressWarnings("unchecked")
@Override
public synchronized void handle(ContainerAllocatorEvent event) {
LOG.info("Processing the event " + event.toString());
@@ -179,7 +191,13 @@ public class RMContainerAllocator extends RMContainerRequestor
if (mapResourceReqt == 0) {
mapResourceReqt = reqEvent.getCapability().getMemory();
int minSlotMemSize = getMinContainerCapability().getMemory();
- mapResourceReqt = (int) Math.ceil((float) mapResourceReqt/minSlotMemSize) * minSlotMemSize;
+ mapResourceReqt = (int) Math.ceil((float) mapResourceReqt/minSlotMemSize)
+ * minSlotMemSize;
+ JobID id = TypeConverter.fromYarn(applicationId);
+ JobId jobId = TypeConverter.toYarn(id);
+ eventHandler.handle(new JobHistoryEvent(jobId,
+ new NormalizedResourceEvent(org.apache.hadoop.mapreduce.TaskType.MAP,
+ mapResourceReqt)));
LOG.info("mapResourceReqt:"+mapResourceReqt);
if (mapResourceReqt > getMaxContainerCapability().getMemory()) {
String diagMsg = "MAP capability required is more than the supported " +
@@ -199,12 +217,20 @@ public class RMContainerAllocator extends RMContainerRequestor
reduceResourceReqt = reqEvent.getCapability().getMemory();
int minSlotMemSize = getMinContainerCapability().getMemory();
//round off on slotsize
- reduceResourceReqt = (int) Math.ceil((float) reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
+ reduceResourceReqt = (int) Math.ceil((float)
+ reduceResourceReqt/minSlotMemSize) * minSlotMemSize;
+ JobID id = TypeConverter.fromYarn(applicationId);
+ JobId jobId = TypeConverter.toYarn(id);
+ eventHandler.handle(new JobHistoryEvent(jobId,
+ new NormalizedResourceEvent(
+ org.apache.hadoop.mapreduce.TaskType.REDUCE,
+ reduceResourceReqt)));
LOG.info("reduceResourceReqt:"+reduceResourceReqt);
if (reduceResourceReqt > getMaxContainerCapability().getMemory()) {
- String diagMsg = "REDUCE capability required is more than the supported " +
- "max container capability in the cluster. Killing the Job. reduceResourceReqt: " +
- reduceResourceReqt + " maxContainerCapability:" + getMaxContainerCapability().getMemory();
+ String diagMsg = "REDUCE capability required is more than the " +
+ "supported max container capability in the cluster. Killing the " +
+ "Job. reduceResourceReqt: " + reduceResourceReqt +
+ " maxContainerCapability:" + getMaxContainerCapability().getMemory();
LOG.info(diagMsg);
eventHandler.handle(new JobDiagnosticsUpdateEvent(
getJob().getID(), diagMsg));
@@ -217,7 +243,8 @@ public class RMContainerAllocator extends RMContainerRequestor
//add to the front of queue for fail fast
pendingReduces.addFirst(new ContainerRequest(reqEvent, PRIORITY_REDUCE));
} else {
- pendingReduces.add(new ContainerRequest(reqEvent, PRIORITY_REDUCE));//reduces are added to pending and are slowly ramped up
+ pendingReduces.add(new ContainerRequest(reqEvent, PRIORITY_REDUCE));
+ //reduces are added to pending and are slowly ramped up
}
}
@@ -410,10 +437,41 @@ public class RMContainerAllocator extends RMContainerRequestor
" rackLocalAssigned:" + rackLocalAssigned +
" availableResources(headroom):" + getAvailableResources();
}
-
+
+ @SuppressWarnings("unchecked")
private List getResources() throws Exception {
int headRoom = getAvailableResources() != null ? getAvailableResources().getMemory() : 0;//first time it would be null
- AMResponse response = makeRemoteRequest();
+ AMResponse response;
+ /*
+ * If contact with RM is lost, the AM will wait MR_AM_TO_RM_WAIT_INTERVAL_MS
+ * milliseconds before aborting. During this interval, AM will still try
+ * to contact the RM.
+ */
+ try {
+ response = makeRemoteRequest();
+ // Reset retry count if no exception occurred.
+ retrystartTime = System.currentTimeMillis();
+ } catch (Exception e) {
+ // This can happen when the connection to the RM has gone down. Keep
+ // re-trying until the retryInterval has expired.
+ if (System.currentTimeMillis() - retrystartTime >= retryInterval) {
+ eventHandler.handle(new JobEvent(this.getJob().getID(),
+ JobEventType.INTERNAL_ERROR));
+ throw new YarnException("Could not contact RM after " +
+ retryInterval + " milliseconds.");
+ }
+ // Throw this up to the caller, which may decide to ignore it and
+ // continue to attempt to contact the RM.
+ throw e;
+ }
+ if (response.getReboot()) {
+ // This can happen if the RM has been restarted. If it is in that state,
+ // this application must clean itself up.
+ eventHandler.handle(new JobEvent(this.getJob().getID(),
+ JobEventType.INTERNAL_ERROR));
+ throw new YarnException("Resource Manager doesn't recognize AttemptId: " +
+ this.getContext().getApplicationID());
+ }
int newHeadRoom = getAvailableResources() != null ? getAvailableResources().getMemory() : 0;
List newContainers = response.getAllocatedContainers();
List finishedContainers = response.getCompletedContainersStatuses();
@@ -509,18 +567,6 @@ public class RMContainerAllocator extends RMContainerRequestor
request = new ContainerRequest(event, PRIORITY_FAST_FAIL_MAP);
} else {
for (String host : event.getHosts()) {
- //host comes from data splitLocations which are hostnames. Containers
- // use IP addresses.
- //TODO Temporary fix for locality. Use resolvers from h-common.
- // Cache to make this more efficient ?
- InetAddress addr = null;
- try {
- addr = InetAddress.getByName(host);
- } catch (UnknownHostException e) {
- LOG.warn("Unable to resolve host to IP for host [: " + host + "]");
- }
- if (addr != null) //Fallback to host if resolve fails.
- host = addr.getHostAddress();
LinkedList list = mapsHostMapping.get(host);
if (list == null) {
list = new LinkedList();
@@ -550,6 +596,7 @@ public class RMContainerAllocator extends RMContainerRequestor
addContainerReq(req);
}
+ @SuppressWarnings("unchecked")
private void assign(List allocatedContainers) {
Iterator it = allocatedContainers.iterator();
LOG.info("Got allocated containers " + allocatedContainers.size());
@@ -557,26 +604,101 @@ public class RMContainerAllocator extends RMContainerRequestor
while (it.hasNext()) {
Container allocated = it.next();
LOG.info("Assigning container " + allocated);
- ContainerRequest assigned = assign(allocated);
-
- if (assigned != null) {
- // Update resource requests
- decContainerReq(assigned);
+
+ // check if allocated container meets memory requirements
+ // and whether we have any scheduled tasks that need
+ // a container to be assigned
+ boolean isAssignable = true;
+ Priority priority = allocated.getPriority();
+ if (PRIORITY_FAST_FAIL_MAP.equals(priority)
+ || PRIORITY_MAP.equals(priority)) {
+ if (allocated.getResource().getMemory() < mapResourceReqt
+ || maps.isEmpty()) {
+ LOG.info("Cannot assign container " + allocated
+ + " for a map as either "
+ + " container memory less than required " + mapResourceReqt
+ + " or no pending map tasks - maps.isEmpty="
+ + maps.isEmpty());
+ isAssignable = false;
+ }
+ }
+ else if (PRIORITY_REDUCE.equals(priority)) {
+ if (allocated.getResource().getMemory() < reduceResourceReqt
+ || reduces.isEmpty()) {
+ LOG.info("Cannot assign container " + allocated
+ + " for a reduce as either "
+ + " container memory less than required " + reduceResourceReqt
+ + " or no pending reduce tasks - reduces.isEmpty="
+ + reduces.isEmpty());
+ isAssignable = false;
+ }
+ }
+
+ boolean blackListed = false;
+ ContainerRequest assigned = null;
+
+ if (isAssignable) {
+ // do not assign if allocated container is on a
+ // blacklisted host
+ blackListed = isNodeBlacklisted(allocated.getNodeId().getHost());
+ if (blackListed) {
+ // we need to request for a new container
+ // and release the current one
+ LOG.info("Got allocated container on a blacklisted "
+ + " host. Releasing container " + allocated);
- // send the container-assigned event to task attempt
- eventHandler.handle(new TaskAttemptContainerAssignedEvent(
- assigned.attemptID, allocated));
+ // find the request matching this allocated container
+ // and replace it with a new one
+ ContainerRequest toBeReplacedReq =
+ getContainerReqToReplace(allocated);
+ if (toBeReplacedReq != null) {
+ LOG.info("Placing a new container request for task attempt "
+ + toBeReplacedReq.attemptID);
+ ContainerRequest newReq =
+ getFilteredContainerRequest(toBeReplacedReq);
+ decContainerReq(toBeReplacedReq);
+ if (toBeReplacedReq.attemptID.getTaskId().getTaskType() ==
+ TaskType.MAP) {
+ maps.put(newReq.attemptID, newReq);
+ }
+ else {
+ reduces.put(newReq.attemptID, newReq);
+ }
+ addContainerReq(newReq);
+ }
+ else {
+ LOG.info("Could not map allocated container to a valid request."
+ + " Releasing allocated container " + allocated);
+ }
+ }
+ else {
+ assigned = assign(allocated);
+ if (assigned != null) {
+ // Update resource requests
+ decContainerReq(assigned);
- assignedRequests.add(allocated.getId(), assigned.attemptID);
-
- LOG.info("Assigned container (" + allocated + ") " +
- " to task " + assigned.attemptID +
- " on node " + allocated.getNodeId().toString());
- } else {
- //not assigned to any request, release the container
- LOG.info("Releasing unassigned and invalid container " + allocated
- + ". RM has gone crazy, someone go look!"
- + " Hey RM, if you are so rich, go donate to non-profits!");
+ // send the container-assigned event to task attempt
+ eventHandler.handle(new TaskAttemptContainerAssignedEvent(
+ assigned.attemptID, allocated, applicationACLs));
+
+ assignedRequests.add(allocated.getId(), assigned.attemptID);
+
+ LOG.info("Assigned container (" + allocated + ") " +
+ " to task " + assigned.attemptID +
+ " on node " + allocated.getNodeId().toString());
+ }
+ else {
+ //not assigned to any request, release the container
+ LOG.info("Releasing unassigned and invalid container "
+ + allocated + ". RM has gone crazy, someone go look!"
+ + " Hey RM, if you are so rich, go donate to non-profits!");
+ }
+ }
+ }
+
+ // release container if it was blacklisted
+ // or if we could not assign it
+ if (blackListed || assigned == null) {
containersReleased++;
release(allocated.getId());
}
@@ -604,12 +726,38 @@ public class RMContainerAllocator extends RMContainerRequestor
return assigned;
}
+ private ContainerRequest getContainerReqToReplace(Container allocated) {
+ Priority priority = allocated.getPriority();
+ ContainerRequest toBeReplaced = null;
+ if (PRIORITY_FAST_FAIL_MAP.equals(priority)
+ || PRIORITY_MAP.equals(priority)) {
+ // allocated container was for a map
+ String host = allocated.getNodeId().getHost();
+ LinkedList list = mapsHostMapping.get(host);
+ if (list != null && list.size() > 0) {
+ TaskAttemptId tId = list.removeLast();
+ if (maps.containsKey(tId)) {
+ toBeReplaced = maps.remove(tId);
+ }
+ }
+ else {
+ TaskAttemptId tId = maps.keySet().iterator().next();
+ toBeReplaced = maps.remove(tId);
+ }
+ }
+ else if (PRIORITY_REDUCE.equals(priority)) {
+ TaskAttemptId tId = reduces.keySet().iterator().next();
+ toBeReplaced = reduces.remove(tId);
+ }
+ return toBeReplaced;
+ }
+
+ @SuppressWarnings("unchecked")
private ContainerRequest assignToFailedMap(Container allocated) {
//try to assign to earlierFailedMaps if present
ContainerRequest assigned = null;
- while (assigned == null && earlierFailedMaps.size() > 0 &&
- allocated.getResource().getMemory() >= mapResourceReqt) {
+ while (assigned == null && earlierFailedMaps.size() > 0) {
TaskAttemptId tId = earlierFailedMaps.removeFirst();
if (maps.containsKey(tId)) {
assigned = maps.remove(tId);
@@ -627,8 +775,7 @@ public class RMContainerAllocator extends RMContainerRequestor
private ContainerRequest assignToReduce(Container allocated) {
ContainerRequest assigned = null;
//try to assign to reduces if present
- if (assigned == null && reduces.size() > 0
- && allocated.getResource().getMemory() >= reduceResourceReqt) {
+ if (assigned == null && reduces.size() > 0) {
TaskAttemptId tId = reduces.keySet().iterator().next();
assigned = reduces.remove(tId);
LOG.info("Assigned to reduce");
@@ -636,13 +783,13 @@ public class RMContainerAllocator extends RMContainerRequestor
return assigned;
}
+ @SuppressWarnings("unchecked")
private ContainerRequest assignToMap(Container allocated) {
//try to assign to maps if present
//first by host, then by rack, followed by *
ContainerRequest assigned = null;
- while (assigned == null && maps.size() > 0
- && allocated.getResource().getMemory() >= mapResourceReqt) {
- String host = getHost(allocated.getNodeId().toString());
+ while (assigned == null && maps.size() > 0) {
+ String host = allocated.getNodeId().getHost();
LinkedList list = mapsHostMapping.get(host);
while (list != null && list.size() > 0) {
LOG.info("Host matched to the request list " + host);
@@ -712,7 +859,8 @@ public class RMContainerAllocator extends RMContainerRequestor
}
void preemptReduce(int toPreempt) {
- List reduceList = new ArrayList(reduces.keySet());
+ List reduceList = new ArrayList
+ (reduces.keySet());
//sort reduces on progress
Collections.sort(reduceList,
new Comparator() {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
index cda2ed678af..ba3c73219dd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/RMContainerRequestor.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.mapreduce.v2.app.rm;
+import java.net.InetAddress;
+import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.HashSet;
@@ -63,7 +65,7 @@ public abstract class RMContainerRequestor extends RMCommunicator {
//Key->ResourceName (e.g., hostname, rackname, *)
//Value->Map
//Key->Resource Capability
- //Value->ResourceReqeust
+ //Value->ResourceRequest
private final Map>>
remoteRequestsTable =
new TreeMap>>();
@@ -87,14 +89,22 @@ public abstract class RMContainerRequestor extends RMCommunicator {
final String[] racks;
//final boolean earlierAttemptFailed;
final Priority priority;
+
public ContainerRequest(ContainerRequestEvent event, Priority priority) {
- this.attemptID = event.getAttemptID();
- this.capability = event.getCapability();
- this.hosts = event.getHosts();
- this.racks = event.getRacks();
- //this.earlierAttemptFailed = event.getEarlierAttemptFailed();
+ this(event.getAttemptID(), event.getCapability(), event.getHosts(),
+ event.getRacks(), priority);
+ }
+
+ public ContainerRequest(TaskAttemptId attemptID,
+ Resource capability, String[] hosts, String[] racks,
+ Priority priority) {
+ this.attemptID = attemptID;
+ this.capability = capability;
+ this.hosts = hosts;
+ this.racks = racks;
this.priority = priority;
}
+
}
@Override
@@ -149,14 +159,35 @@ public abstract class RMContainerRequestor extends RMCommunicator {
//remove all the requests corresponding to this hostname
for (Map> remoteRequests
: remoteRequestsTable.values()){
- //remove from host
- Map reqMap = remoteRequests.remove(hostName);
+ //remove from host if no pending allocations
+ boolean foundAll = true;
+ Map reqMap = remoteRequests.get(hostName);
if (reqMap != null) {
for (ResourceRequest req : reqMap.values()) {
- ask.remove(req);
+ if (!ask.remove(req)) {
+ foundAll = false;
+ // if ask already sent to RM, we can try and overwrite it if possible.
+ // send a new ask to RM with numContainers
+ // specified for the blacklisted host to be 0.
+ ResourceRequest zeroedRequest = BuilderUtils.newResourceRequest(req);
+ zeroedRequest.setNumContainers(0);
+ // to be sent to RM on next heartbeat
+ ask.add(zeroedRequest);
+ }
+ }
+ // if all requests were still in ask queue
+ // we can remove this request
+ if (foundAll) {
+ remoteRequests.remove(hostName);
}
}
- //TODO: remove from rack
+ // TODO handling of rack blacklisting
+ // Removing from rack should be dependent on no. of failures within the rack
+ // Blacklisting a rack on the basis of a single node's blacklisting
+ // may be overly aggressive.
+ // Node failures could be co-related with other failures on the same rack
+ // but we probably need a better approach at trying to decide how and when
+ // to blacklist a rack
}
} else {
nodeFailures.put(hostName, failures);
@@ -171,7 +202,9 @@ public abstract class RMContainerRequestor extends RMCommunicator {
// Create resource requests
for (String host : req.hosts) {
// Data-local
- addResourceRequest(req.priority, host, req.capability);
+ if (!isNodeBlacklisted(host)) {
+ addResourceRequest(req.priority, host, req.capability);
+ }
}
// Nothing Rack-local for now
@@ -234,6 +267,14 @@ public abstract class RMContainerRequestor extends RMCommunicator {
Map> remoteRequests =
this.remoteRequestsTable.get(priority);
Map reqMap = remoteRequests.get(resourceName);
+ if (reqMap == null) {
+ // as we modify the resource requests by filtering out blacklisted hosts
+ // when they are added, this value may be null when being
+ // decremented
+ LOG.debug("Not decrementing resource as " + resourceName
+ + " is not present in request table");
+ return;
+ }
ResourceRequest remoteRequest = reqMap.get(capability);
LOG.info("BEFORE decResourceRequest:" + " applicationId=" + applicationId.getId()
@@ -267,4 +308,23 @@ public abstract class RMContainerRequestor extends RMCommunicator {
release.add(containerId);
}
+ protected boolean isNodeBlacklisted(String hostname) {
+ if (!nodeBlacklistingEnabled) {
+ return false;
+ }
+ return blacklistedNodes.contains(hostname);
+ }
+
+ protected ContainerRequest getFilteredContainerRequest(ContainerRequest orig) {
+ ArrayList newHosts = new ArrayList();
+ for (String host : orig.hosts) {
+ if (!isNodeBlacklisted(host)) {
+ newHosts.add(host);
+ }
+ }
+ String[] hosts = newHosts.toArray(new String[newHosts.size()]);
+ ContainerRequest newReq = new ContainerRequest(orig.attemptID, orig.capability,
+ hosts, orig.racks, orig.priority);
+ return newReq;
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/package-info.java
new file mode 100644
index 00000000000..68583fe7bdf
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/rm/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.rm;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java
new file mode 100644
index 00000000000..3f6ecb4386b
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/MRAMPolicyProvider.java
@@ -0,0 +1,50 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.v2.app.security.authorize;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.security.authorize.PolicyProvider;
+import org.apache.hadoop.security.authorize.Service;
+import org.apache.hadoop.yarn.proto.MRClientProtocol;
+
+/**
+ * {@link PolicyProvider} for YARN MapReduce protocols.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class MRAMPolicyProvider extends PolicyProvider {
+
+ private static final Service[] mapReduceApplicationMasterServices =
+ new Service[] {
+ new Service(
+ MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_TASK_UMBILICAL,
+ TaskUmbilicalProtocol.class),
+ new Service(
+ MRJobConfig.MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT,
+ MRClientProtocol.MRClientProtocolService.BlockingInterface.class)
+ };
+
+ @Override
+ public Service[] getServices() {
+ return mapReduceApplicationMasterServices;
+ }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/package-info.java
new file mode 100644
index 00000000000..d95af78203b
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/security/authorize/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.security.authorize;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java
index cb6b441743e..9c1be8602ff 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/ExponentiallySmoothedTaskRuntimeEstimator.java
@@ -135,9 +135,9 @@ public class ExponentiallySmoothedTaskRuntimeEstimator extends StartEndTimesBase
lambda
= conf.getLong(MRJobConfig.MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS,
- MRJobConfig.DEFAULT_MR_AM_TASK_ESTIMATOR_SMNOOTH_LAMBDA_MS);
+ MRJobConfig.DEFAULT_MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS);
smoothedValue
- = conf.getBoolean(MRJobConfig.MR_AM_TASK_EXTIMATOR_EXPONENTIAL_RATE_ENABLE, true)
+ = conf.getBoolean(MRJobConfig.MR_AM_TASK_ESTIMATOR_EXPONENTIAL_RATE_ENABLE, true)
? SmoothedValue.RATE : SmoothedValue.TIME_PER_UNIT_PROGRESS;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/package-info.java
new file mode 100644
index 00000000000..69c7eca5544
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/speculate/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.speculate;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java
index b18b334a4a4..da5a342a2ee 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/TaskCleanerImpl.java
@@ -20,6 +20,7 @@ package org.apache.hadoop.mapreduce.v2.app.taskclean;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.LinkedBlockingQueue;
+import java.util.concurrent.ThreadFactory;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
@@ -31,6 +32,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.service.AbstractService;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
+
public class TaskCleanerImpl extends AbstractService implements TaskCleaner {
private static final Log LOG = LogFactory.getLog(TaskCleanerImpl.class);
@@ -47,8 +50,11 @@ public class TaskCleanerImpl extends AbstractService implements TaskCleaner {
}
public void start() {
+ ThreadFactory tf = new ThreadFactoryBuilder()
+ .setNameFormat("TaskCleaner #%d")
+ .build();
launcherPool = new ThreadPoolExecutor(1, 5, 1,
- TimeUnit.HOURS, new LinkedBlockingQueue());
+ TimeUnit.HOURS, new LinkedBlockingQueue(), tf);
eventHandlingThread = new Thread(new Runnable() {
@Override
public void run() {
@@ -65,6 +71,7 @@ public class TaskCleanerImpl extends AbstractService implements TaskCleaner {
launcherPool.execute(new EventProcessor(event)); }
}
});
+ eventHandlingThread.setName("TaskCleaner Event Handler");
eventHandlingThread.start();
super.start();
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/package-info.java
new file mode 100644
index 00000000000..b58a545444d
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/taskclean/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.taskclean;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
index eff721d17f7..aa0d89c0342 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/AppController.java
@@ -28,9 +28,12 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.StringHelper;
import org.apache.hadoop.yarn.util.Times;
@@ -267,6 +270,29 @@ public class AppController extends Controller implements AMParams {
setStatus(HttpServletResponse.SC_NOT_FOUND);
setTitle(join("Not found: ", s));
}
+
+ /**
+ * Render a ACCESS_DENIED error.
+ * @param s the error message to include.
+ */
+ void accessDenied(String s) {
+ setStatus(HttpServletResponse.SC_FORBIDDEN);
+ setTitle(join("Access denied: ", s));
+ throw new RuntimeException("Access denied: " + s);
+ }
+
+ /**
+ * check for job access.
+ * @param job the job that is being accessed
+ */
+ void checkAccess(Job job) {
+ UserGroupInformation callerUgi = UserGroupInformation.createRemoteUser(
+ request().getRemoteUser());
+ if (!job.checkAccess(callerUgi, JobACL.VIEW_JOB)) {
+ accessDenied("User " + request().getRemoteUser() + " does not have " +
+ " permissions.");
+ }
+ }
/**
* Ensure that a JOB_ID was passed into the page.
@@ -281,6 +307,9 @@ public class AppController extends Controller implements AMParams {
if (app.getJob() == null) {
notFound($(JOB_ID));
}
+ /* check for acl access */
+ Job job = app.context.getJob(jobID);
+ checkAccess(job);
} catch (Exception e) {
badRequest(e.getMessage() == null ?
e.getClass().getName() : e.getMessage());
@@ -296,7 +325,8 @@ public class AppController extends Controller implements AMParams {
throw new RuntimeException("missing task ID");
}
TaskId taskID = MRApps.toTaskID($(TASK_ID));
- app.setJob(app.context.getJob(taskID.getJobId()));
+ Job job = app.context.getJob(taskID.getJobId());
+ app.setJob(job);
if (app.getJob() == null) {
notFound(MRApps.toString(taskID.getJobId()));
} else {
@@ -305,6 +335,7 @@ public class AppController extends Controller implements AMParams {
notFound($(TASK_ID));
}
}
+ checkAccess(job);
} catch (Exception e) {
badRequest(e.getMessage());
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
index a23821ec4b4..cf6ab99a936 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/CountersBlock.java
@@ -119,11 +119,16 @@ public class CountersBlock extends HtmlBlock {
for (Counter counter : g.getAllCounters().values()) {
// Ditto
TR>>>>>>> groupRow = group.
- tr().
- td().$title(counter.getName()).
+ tr();
+ if (mg == null && rg == null) {
+ groupRow.td().$title(counter.getName())._(counter.getDisplayName()).
+ _();
+ } else {
+ groupRow.td().$title(counter.getName()).
a(url(urlBase,urlId,g.getName(),
counter.getName()), counter.getDisplayName()).
_();
+ }
if (map != null) {
Counter mc = mg == null ? null : mg.getCounter(counter.getName());
Counter rc = rg == null ? null : rg.getCounter(counter.getName());
@@ -168,12 +173,11 @@ public class CountersBlock extends HtmlBlock {
}
// Get all types of counters
Map tasks = job.getTasks();
- total = JobImpl.newCounters();
+ total = job.getCounters();
map = JobImpl.newCounters();
reduce = JobImpl.newCounters();
for (Task t : tasks.values()) {
Counters counters = t.getCounters();
- JobImpl.incrAllCounters(total, counters);
switch (t.getType()) {
case MAP: JobImpl.incrAllCounters(map, counters); break;
case REDUCE: JobImpl.incrAllCounters(reduce, counters); break;
@@ -184,4 +188,4 @@ public class CountersBlock extends HtmlBlock {
private String fixGroupDisplayName(CharSequence name) {
return name.toString().replace(".", ".\u200B").replace("$", "\u200B$");
}
-}
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
index de56f5a2228..56a0a2f4c0f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/NavBlock.java
@@ -18,10 +18,13 @@
package org.apache.hadoop.mapreduce.v2.app.webapp;
+import java.util.List;
+
import com.google.inject.Inject;
import static org.apache.hadoop.mapreduce.v2.app.webapp.AMWebApp.*;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.*;
@@ -47,6 +50,10 @@ public class NavBlock extends HtmlBlock {
li().a(url("app"), "Jobs")._()._();
if (app.getJob() != null) {
String jobid = MRApps.toString(app.getJob().getID());
+ List amInfos = app.getJob().getAMInfos();
+ AMInfo thisAmInfo = amInfos.get(amInfos.size()-1);
+ String nodeHttpAddress = thisAmInfo.getNodeManagerHost() + ":"
+ + thisAmInfo.getNodeManagerHttpPort();
nav.
h3("Job").
ul().
@@ -54,7 +61,11 @@ public class NavBlock extends HtmlBlock {
li().a(url("jobcounters", jobid), "Counters")._().
li().a(url("conf", jobid), "Configuration")._().
li().a(url("tasks", jobid, "m"), "Map tasks")._().
- li().a(url("tasks", jobid, "r"), "Reduce tasks")._()._();
+ li().a(url("tasks", jobid, "r"), "Reduce tasks")._().
+ li().a(".logslink", url("http://", nodeHttpAddress, "node",
+ "containerlogs", thisAmInfo.getContainerId().toString(),
+ app.getJob().getUserName()),
+ "AM Logs")._()._();
if (app.getTask() != null) {
String taskid = MRApps.toString(app.getTask().getID());
nav.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
index 9918f66c80e..5acc77053fc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/TaskPage.java
@@ -86,7 +86,7 @@ public class TaskPage extends AppView {
String containerIdStr = ConverterUtils.toString(containerId);
nodeTd._(" ").
a(".logslink", url("http://", nodeHttpAddr, "node", "containerlogs",
- containerIdStr), "logs");
+ containerIdStr, app.getJob().getUserName()), "logs");
}
nodeTd._().
td(".ts", Times.format(startTime)).
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/package-info.java
new file mode 100644
index 00000000000..ba1cdbb2fa0
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/webapp/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.app.webapp;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java
new file mode 100644
index 00000000000..a5756da9934
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapred/TestTaskAttemptListenerImpl.java
@@ -0,0 +1,100 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+package org.apache.hadoop.mapred;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.*;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.TaskHeartbeatHandler;
+import org.junit.Test;
+
+public class TestTaskAttemptListenerImpl {
+ public static class MockTaskAttemptListenerImpl extends TaskAttemptListenerImpl {
+
+ public MockTaskAttemptListenerImpl(AppContext context,
+ JobTokenSecretManager jobTokenSecretManager,
+ TaskHeartbeatHandler hbHandler) {
+ super(context, jobTokenSecretManager);
+ this.taskHeartbeatHandler = hbHandler;
+ }
+
+ @Override
+ protected void registerHeartbeatHandler() {
+ //Empty
+ }
+
+ @Override
+ protected void startRpcServer() {
+ //Empty
+ }
+
+ @Override
+ protected void stopRpcServer() {
+ //Empty
+ }
+ }
+
+ @Test
+ public void testGetTask() throws IOException {
+ AppContext appCtx = mock(AppContext.class);
+ JobTokenSecretManager secret = mock(JobTokenSecretManager.class);
+ TaskHeartbeatHandler hbHandler = mock(TaskHeartbeatHandler.class);
+ MockTaskAttemptListenerImpl listener =
+ new MockTaskAttemptListenerImpl(appCtx, secret, hbHandler);
+ Configuration conf = new Configuration();
+ listener.init(conf);
+ listener.start();
+ JVMId id = new JVMId("foo",1, true, 1);
+ WrappedJvmID wid = new WrappedJvmID(id.getJobId(), id.isMap, id.getId());
+
+ //The JVM ID has not been registered yet so we should kill it.
+ JvmContext context = new JvmContext();
+ context.jvmId = id;
+ JvmTask result = listener.getTask(context);
+ assertNotNull(result);
+ assertTrue(result.shouldDie);
+
+ //Now register the JVM, and see
+ listener.registerPendingTask(wid);
+ result = listener.getTask(context);
+ assertNull(result);
+
+ TaskAttemptId attemptID = mock(TaskAttemptId.class);
+ Task task = mock(Task.class);
+ //Now put a task with the ID
+ listener.registerLaunchedTask(attemptID, task, wid);
+ verify(hbHandler).register(attemptID);
+ result = listener.getTask(context);
+ assertNotNull(result);
+ assertFalse(result.shouldDie);
+
+ //Verify that if we call it again a second time we are told to die.
+ result = listener.getTask(context);
+ assertNotNull(result);
+ assertTrue(result.shouldDie);
+
+ listener.unregister(attemptID, wid);
+ listener.stop();
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
index d6e2d968173..888bec3e508 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRApp.java
@@ -31,8 +31,12 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileContext;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapred.WrappedJvmID;
+import org.apache.hadoop.mapreduce.JobID;
import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.NormalizedResourceEvent;
import org.apache.hadoop.mapreduce.security.token.JobTokenSecretManager;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
@@ -77,6 +81,7 @@ import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
import org.apache.hadoop.yarn.service.Service;
import org.apache.hadoop.yarn.state.StateMachine;
import org.apache.hadoop.yarn.state.StateMachineFactory;
+import org.apache.hadoop.yarn.util.BuilderUtils;
/**
@@ -91,6 +96,10 @@ public class MRApp extends MRAppMaster {
private File testWorkDir;
private Path testAbsPath;
+
+ public static String NM_HOST = "localhost";
+ public static int NM_PORT = 1234;
+ public static int NM_HTTP_PORT = 9999;
private static final RecordFactory recordFactory =
RecordFactoryProvider.getRecordFactory(null);
@@ -118,10 +127,21 @@ public class MRApp extends MRAppMaster {
applicationAttemptId.setAttemptId(startCount);
return applicationAttemptId;
}
+
+ private static ContainerId getContainerId(ApplicationId applicationId,
+ int startCount) {
+ ApplicationAttemptId appAttemptId =
+ getApplicationAttemptId(applicationId, startCount);
+ ContainerId containerId =
+ BuilderUtils.newContainerId(appAttemptId, startCount);
+ return containerId;
+ }
public MRApp(int maps, int reduces, boolean autoComplete, String testName,
boolean cleanOnStart, int startCount) {
- super(getApplicationAttemptId(applicationId, startCount));
+ super(getApplicationAttemptId(applicationId, startCount), getContainerId(
+ applicationId, startCount), NM_HOST, NM_PORT, NM_HTTP_PORT, System
+ .currentTimeMillis());
this.testWorkDir = new File("target", testName);
testAbsPath = new Path(testWorkDir.getAbsolutePath());
LOG.info("PathUsed: " + testAbsPath);
@@ -253,9 +273,11 @@ public class MRApp extends MRAppMaster {
} catch (IOException e) {
throw new YarnException(e);
}
- Job newJob = new TestJob(conf, getAppID(), getDispatcher().getEventHandler(),
- getTaskAttemptListener(), getContext().getClock(),
- currentUser.getUserName());
+ Job newJob = new TestJob(getJobId(), getAttemptID(), conf,
+ getDispatcher().getEventHandler(),
+ getTaskAttemptListener(), getContext().getClock(),
+ getCommitter(), isNewApiCommitter(),
+ currentUser.getUserName());
((AppContext) getContext()).getAllJobs().put(newJob.getID(), newJob);
getDispatcher().register(JobFinishEvent.Type.class,
@@ -277,11 +299,14 @@ public class MRApp extends MRAppMaster {
return null;
}
@Override
- public void register(TaskAttemptId attemptID,
+ public void registerLaunchedTask(TaskAttemptId attemptID,
org.apache.hadoop.mapred.Task task, WrappedJvmID jvmID) {}
@Override
public void unregister(TaskAttemptId attemptID, WrappedJvmID jvmID) {
}
+ @Override
+ public void registerPendingTask(WrappedJvmID jvmID) {
+ }
};
}
@@ -301,15 +326,18 @@ public class MRApp extends MRAppMaster {
}
class MockContainerLauncher implements ContainerLauncher {
+
+ //We are running locally so set the shuffle port to -1
+ int shufflePort = -1;
+
+ @SuppressWarnings("unchecked")
@Override
public void handle(ContainerLauncherEvent event) {
switch (event.getType()) {
case CONTAINER_REMOTE_LAUNCH:
- //We are running locally so set the shuffle port to -1
getContext().getEventHandler().handle(
new TaskAttemptContainerLaunchedEvent(event.getTaskAttemptID(),
- -1)
- );
+ shufflePort));
attemptLaunched(event.getTaskAttemptID());
break;
@@ -341,16 +369,22 @@ public class MRApp extends MRAppMaster {
ContainerId cId = recordFactory.newRecordInstance(ContainerId.class);
cId.setApplicationAttemptId(getContext().getApplicationAttemptId());
cId.setId(containerCount++);
- Container container = recordFactory.newRecordInstance(Container.class);
- container.setId(cId);
- container.setNodeId(recordFactory.newRecordInstance(NodeId.class));
- container.getNodeId().setHost("dummy");
- container.getNodeId().setPort(1234);
- container.setContainerToken(null);
- container.setNodeHttpAddress("localhost:9999");
+ NodeId nodeId = BuilderUtils.newNodeId(NM_HOST, NM_PORT);
+ Container container = BuilderUtils.newContainer(cId, nodeId,
+ NM_HOST + ":" + NM_HTTP_PORT, null, null, null);
+ JobID id = TypeConverter.fromYarn(applicationId);
+ JobId jobId = TypeConverter.toYarn(id);
+ getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
+ new NormalizedResourceEvent(
+ org.apache.hadoop.mapreduce.TaskType.REDUCE,
+ 100)));
+ getContext().getEventHandler().handle(new JobHistoryEvent(jobId,
+ new NormalizedResourceEvent(
+ org.apache.hadoop.mapreduce.TaskType.MAP,
+ 100)));
getContext().getEventHandler().handle(
new TaskAttemptContainerAssignedEvent(event.getAttemptID(),
- container));
+ container, null));
}
};
}
@@ -402,13 +436,15 @@ public class MRApp extends MRAppMaster {
return localStateMachine;
}
- public TestJob(Configuration conf, ApplicationId applicationId,
- EventHandler eventHandler, TaskAttemptListener taskAttemptListener,
- Clock clock, String user) {
- super(getApplicationAttemptId(applicationId, getStartCount()),
+ public TestJob(JobId jobId, ApplicationAttemptId applicationAttemptId,
+ Configuration conf, EventHandler eventHandler,
+ TaskAttemptListener taskAttemptListener, Clock clock,
+ OutputCommitter committer, boolean newApiCommitter, String user) {
+ super(jobId, getApplicationAttemptId(applicationId, getStartCount()),
conf, eventHandler, taskAttemptListener,
- new JobTokenSecretManager(), new Credentials(), clock,
- getCompletedTaskFromPreviousRun(), metrics, user);
+ new JobTokenSecretManager(), new Credentials(), clock,
+ getCompletedTaskFromPreviousRun(), metrics, committer,
+ newApiCommitter, user, System.currentTimeMillis(), getAllAMInfos());
// This "this leak" is okay because the retained pointer is in an
// instance variable.
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
index c5a117ce4f8..0d6c7d7576c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MRAppBenchmark.java
@@ -144,7 +144,7 @@ public class MRAppBenchmark {
getContext().getEventHandler()
.handle(
new TaskAttemptContainerAssignedEvent(event
- .getAttemptID(), container));
+ .getAttemptID(), container, null));
concurrentRunningTasks++;
} else {
Thread.sleep(1000);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
index 7f55dd4d571..7a6e1f061b0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/MockJobs.java
@@ -24,6 +24,7 @@ import com.google.common.collect.Maps;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
+import java.util.LinkedList;
import java.util.List;
import java.util.Map;
@@ -33,6 +34,7 @@ import org.apache.hadoop.mapreduce.FileSystemCounter;
import org.apache.hadoop.mapreduce.JobACL;
import org.apache.hadoop.mapreduce.JobCounter;
import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -51,12 +53,14 @@ import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.yarn.MockApps;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.BuilderUtils;
import org.apache.hadoop.yarn.util.Records;
public class MockJobs extends MockApps {
@@ -85,6 +89,10 @@ public class MockJobs extends MockApps {
static final Iterator DIAGS = Iterators.cycle(
"Error: java.lang.OutOfMemoryError: Java heap space",
"Lost task tracker: tasktracker.domain/127.0.0.1:40879");
+
+ public static final String NM_HOST = "localhost";
+ public static final int NM_PORT = 1234;
+ public static final int NM_HTTP_PORT = 9999;
static final int DT = 1000000; // ms
@@ -271,6 +279,11 @@ public class MockJobs extends MockApps {
public long getSortFinishTime() {
return 0;
}
+
+ @Override
+ public String getNodeRackName() {
+ return "/default-rack";
+ }
};
}
@@ -488,6 +501,23 @@ public class MockJobs extends MockApps {
public Map getJobACLs() {
return Collections.emptyMap();
}
+
+ @Override
+ public List getAMInfos() {
+ List amInfoList = new LinkedList();
+ amInfoList.add(createAMInfo(1));
+ amInfoList.add(createAMInfo(2));
+ return amInfoList;
+ }
};
}
+
+ private static AMInfo createAMInfo(int attempt) {
+ ApplicationAttemptId appAttemptId =
+ BuilderUtils.newApplicationAttemptId(
+ BuilderUtils.newApplicationId(100, 1), attempt);
+ ContainerId containerId = BuilderUtils.newContainerId(appAttemptId, 1);
+ return MRBuilderUtils.newAMInfo(appAttemptId, System.currentTimeMillis(),
+ containerId, NM_HOST, NM_PORT, NM_HTTP_PORT);
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestContainerLauncher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestContainerLauncher.java
new file mode 100644
index 00000000000..b2686e2314b
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestContainerLauncher.java
@@ -0,0 +1,133 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import java.io.IOException;
+import java.util.Map;
+
+import junit.framework.Assert;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncherImpl;
+import org.apache.hadoop.yarn.api.ContainerManager;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ContainerToken;
+import org.junit.Test;
+
+public class TestContainerLauncher {
+
+ static final Log LOG = LogFactory
+ .getLog(TestContainerLauncher.class);
+
+ @Test
+ public void testSlowNM() throws Exception {
+ test(false);
+ }
+
+ @Test
+ public void testSlowNMWithInterruptsSwallowed() throws Exception {
+ test(true);
+ }
+
+ private void test(boolean swallowInterrupts) throws Exception {
+
+ MRApp app = new MRAppWithSlowNM(swallowInterrupts);
+
+ Configuration conf = new Configuration();
+ int maxAttempts = 1;
+ conf.setInt(MRJobConfig.MAP_MAX_ATTEMPTS, maxAttempts);
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+
+ // Set low timeout for NM commands
+ conf.setInt(ContainerLauncher.MR_AM_NM_COMMAND_TIMEOUT, 3000);
+
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+
+ Map tasks = job.getTasks();
+ Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
+
+ Task task = tasks.values().iterator().next();
+ app.waitForState(task, TaskState.SCHEDULED);
+
+ Map attempts = tasks.values().iterator()
+ .next().getAttempts();
+ Assert.assertEquals("Num attempts is not correct", maxAttempts, attempts
+ .size());
+
+ TaskAttempt attempt = attempts.values().iterator().next();
+ app.waitForState(attempt, TaskAttemptState.ASSIGNED);
+
+ app.waitForState(job, JobState.FAILED);
+
+ LOG.info("attempt.getDiagnostics: " + attempt.getDiagnostics());
+ Assert.assertTrue(attempt.getDiagnostics().toString().contains(
+ "Container launch failed for container_0_0000_01_000000 : "));
+ Assert.assertTrue(attempt.getDiagnostics().toString().contains(
+ ": java.lang.InterruptedException"));
+
+ app.stop();
+ }
+
+ private static class MRAppWithSlowNM extends MRApp {
+
+ final boolean swallowInterrupts;
+
+ public MRAppWithSlowNM(boolean swallowInterrupts) {
+ super(1, 0, false, "TestContainerLauncher", true);
+ this.swallowInterrupts = swallowInterrupts;
+ }
+
+ @Override
+ protected ContainerLauncher createContainerLauncher(AppContext context) {
+ return new ContainerLauncherImpl(context) {
+ @Override
+ protected ContainerManager getCMProxy(ContainerId containerID,
+ String containerManagerBindAddr, ContainerToken containerToken)
+ throws IOException {
+ try {
+ synchronized (this) {
+ wait(); // Just hang the thread simulating a very slow NM.
+ }
+ } catch (InterruptedException e) {
+ LOG.info(e);
+ if (!swallowInterrupts) {
+ throw new IOException(e);
+ } else {
+ Thread.currentThread().interrupt();
+ }
+ }
+ return null;
+ }
+ };
+ };
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
index 5598cecb5ab..1b35b21559a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestFail.java
@@ -219,7 +219,7 @@ public class TestFail {
}
@Override
- protected ContainerManager getCMProxy(ContainerId containerID,
+ protected ContainerManager getCMProxy(ContainerId contianerID,
String containerManagerBindAddr, ContainerToken containerToken)
throws IOException {
try {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
new file mode 100644
index 00000000000..46cb11e9241
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestJobEndNotifier.java
@@ -0,0 +1,108 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+import org.junit.Assert;
+import org.junit.Test;
+import org.mockito.Mockito;
+
+/**
+ * Tests job end notification
+ *
+ */
+public class TestJobEndNotifier extends JobEndNotifier {
+
+ //Test maximum retries is capped by MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS
+ private void testNumRetries(Configuration conf) {
+ conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "0");
+ conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "10");
+ setConf(conf);
+ Assert.assertTrue("Expected numTries to be 0, but was " + numTries,
+ numTries == 0 );
+
+ conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "1");
+ setConf(conf);
+ Assert.assertTrue("Expected numTries to be 1, but was " + numTries,
+ numTries == 1 );
+
+ conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "20");
+ setConf(conf);
+ Assert.assertTrue("Expected numTries to be 11, but was " + numTries,
+ numTries == 11 ); //11 because number of _retries_ is 10
+ }
+
+ //Test maximum retry interval is capped by
+ //MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL
+ private void testWaitInterval(Configuration conf) {
+ conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL, "5");
+ conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "1");
+ setConf(conf);
+ Assert.assertTrue("Expected waitInterval to be 1, but was " + waitInterval,
+ waitInterval == 1);
+
+ conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "10");
+ setConf(conf);
+ Assert.assertTrue("Expected waitInterval to be 5, but was " + waitInterval,
+ waitInterval == 5);
+
+ //Test negative numbers are set to default
+ conf.set(MRJobConfig.MR_JOB_END_RETRY_INTERVAL, "-10");
+ setConf(conf);
+ Assert.assertTrue("Expected waitInterval to be 5, but was " + waitInterval,
+ waitInterval == 5);
+ }
+
+ /**
+ * Test that setting parameters has the desired effect
+ */
+ @Test
+ public void checkConfiguration() {
+ Configuration conf = new Configuration();
+ testNumRetries(conf);
+ testWaitInterval(conf);
+ }
+
+ protected int notificationCount = 0;
+ @Override
+ protected boolean notifyURLOnce() {
+ boolean success = super.notifyURLOnce();
+ notificationCount++;
+ return success;
+ }
+
+ //Check retries happen as intended
+ @Test
+ public void testNotifyRetries() throws InterruptedException {
+ Configuration conf = new Configuration();
+ conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_URL, "http://nonexistent");
+ conf.set(MRJobConfig.MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS, "3");
+ conf.set(MRJobConfig.MR_JOB_END_RETRY_ATTEMPTS, "3");
+ JobReport jobReport = Mockito.mock(JobReport.class);
+
+ this.notificationCount = 0;
+ this.setConf(conf);
+ this.notify(jobReport);
+ Assert.assertEquals("Only 3 retries were expected but was : "
+ + this.notificationCount, this.notificationCount, 3);
+ }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
index c21c4528fb8..c636b1c00d8 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRAppMaster.java
@@ -27,6 +27,7 @@ import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.util.MRApps;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.util.ConverterUtils;
import org.junit.Test;
@@ -36,11 +37,15 @@ public class TestMRAppMaster {
public void testMRAppMasterForDifferentUser() throws IOException,
InterruptedException {
String applicationAttemptIdStr = "appattempt_1317529182569_0004_000001";
+ String containerIdStr = "container_1317529182569_0004_000001_1";
String stagingDir = "/tmp/staging";
String userName = "TestAppMasterUser";
ApplicationAttemptId applicationAttemptId = ConverterUtils
.toApplicationAttemptId(applicationAttemptIdStr);
- MRAppMasterTest appMaster = new MRAppMasterTest(applicationAttemptId);
+ ContainerId containerId = ConverterUtils.toContainerId(containerIdStr);
+ MRAppMasterTest appMaster =
+ new MRAppMasterTest(applicationAttemptId, containerId, "host", -1, -1,
+ System.currentTimeMillis());
YarnConfiguration conf = new YarnConfiguration();
conf.set(MRJobConfig.MR_AM_STAGING_DIR, stagingDir);
MRAppMaster.initAndStartAppMaster(appMaster, conf, userName);
@@ -54,8 +59,10 @@ class MRAppMasterTest extends MRAppMaster {
Path stagingDirPath;
private Configuration conf;
- public MRAppMasterTest(ApplicationAttemptId applicationAttemptId) {
- super(applicationAttemptId);
+ public MRAppMasterTest(ApplicationAttemptId applicationAttemptId,
+ ContainerId containerId, String host, int port, int httpPort,
+ long submitTime) {
+ super(applicationAttemptId, containerId, host, port, httpPort, submitTime);
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java
index c32f128fd2a..9c59269ec6f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestMRClientService.java
@@ -32,13 +32,15 @@ import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompleti
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.Phase;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
-import org.apache.hadoop.mapreduce.v2.app.AppContext;
import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.client.MRClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
@@ -49,10 +51,8 @@ import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptStatusUpdateEvent.TaskAttemptStatus;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
import org.apache.hadoop.yarn.factories.RecordFactory;
import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
-import org.apache.hadoop.yarn.ipc.RPCUtil;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.junit.Test;
@@ -83,7 +83,6 @@ public class TestMRClientService {
TaskAttemptStatus taskAttemptStatus = new TaskAttemptStatus();
taskAttemptStatus.id = attempt.getID();
taskAttemptStatus.progress = 0.5f;
- taskAttemptStatus.diagnosticInfo = diagnostic2;
taskAttemptStatus.stateString = "RUNNING";
taskAttemptStatus.taskState = TaskAttemptState.RUNNING;
taskAttemptStatus.phase = Phase.MAP;
@@ -107,8 +106,9 @@ public class TestMRClientService {
GetJobReportRequest gjrRequest =
recordFactory.newRecordInstance(GetJobReportRequest.class);
gjrRequest.setJobId(job.getID());
- Assert.assertNotNull("JobReport is null",
- proxy.getJobReport(gjrRequest).getJobReport());
+ JobReport jr = proxy.getJobReport(gjrRequest).getJobReport();
+ verifyJobReport(jr);
+
GetTaskAttemptCompletionEventsRequest gtaceRequest =
recordFactory.newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
@@ -127,8 +127,10 @@ public class TestMRClientService {
GetTaskAttemptReportRequest gtarRequest =
recordFactory.newRecordInstance(GetTaskAttemptReportRequest.class);
gtarRequest.setTaskAttemptId(attempt.getID());
- Assert.assertNotNull("TaskAttemptReport is null",
- proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport());
+ TaskAttemptReport tar =
+ proxy.getTaskAttemptReport(gtarRequest).getTaskAttemptReport();
+ verifyTaskAttemptReport(tar);
+
GetTaskReportRequest gtrRequest =
recordFactory.newRecordInstance(GetTaskReportRequest.class);
@@ -151,14 +153,12 @@ public class TestMRClientService {
proxy.getTaskReports(gtreportsRequest).getTaskReportList());
List diag = proxy.getDiagnostics(gdRequest).getDiagnosticsList();
- Assert.assertEquals("Num diagnostics not correct", 2 , diag.size());
+ Assert.assertEquals("Num diagnostics not correct", 1 , diag.size());
Assert.assertEquals("Diag 1 not correct",
diagnostic1, diag.get(0).toString());
- Assert.assertEquals("Diag 2 not correct",
- diagnostic2, diag.get(1).toString());
TaskReport taskReport = proxy.getTaskReport(gtrRequest).getTaskReport();
- Assert.assertEquals("Num diagnostics not correct", 2,
+ Assert.assertEquals("Num diagnostics not correct", 1,
taskReport.getDiagnosticsCount());
//send the done signal to the task
@@ -170,6 +170,31 @@ public class TestMRClientService {
app.waitForState(job, JobState.SUCCEEDED);
}
+ private void verifyJobReport(JobReport jr) {
+ Assert.assertNotNull("JobReport is null", jr);
+ List amInfos = jr.getAMInfos();
+ Assert.assertEquals(1, amInfos.size());
+ Assert.assertEquals(JobState.RUNNING, jr.getJobState());
+ AMInfo amInfo = amInfos.get(0);
+ Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
+ Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
+ Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
+ Assert.assertEquals(1, amInfo.getAppAttemptId().getAttemptId());
+ Assert.assertEquals(1, amInfo.getContainerId().getApplicationAttemptId()
+ .getAttemptId());
+ Assert.assertTrue(amInfo.getStartTime() > 0);
+ }
+
+ private void verifyTaskAttemptReport(TaskAttemptReport tar) {
+ Assert.assertEquals(TaskAttemptState.RUNNING, tar.getTaskAttemptState());
+ Assert.assertNotNull("TaskAttemptReport is null", tar);
+ Assert.assertEquals(MRApp.NM_HOST, tar.getNodeManagerHost());
+ Assert.assertEquals(MRApp.NM_PORT, tar.getNodeManagerPort());
+ Assert.assertEquals(MRApp.NM_HTTP_PORT, tar.getNodeManagerHttpPort());
+ Assert.assertEquals(1, tar.getContainerId().getApplicationAttemptId()
+ .getAttemptId());
+ }
+
class MRAppWithClientService extends MRApp {
MRClientService clientService = null;
MRAppWithClientService(int maps, int reduces, boolean autoComplete) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
index 53e94db42b0..9dd877b3301 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRMContainerAllocator.java
@@ -34,6 +34,7 @@ import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.MRJobConfig;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
@@ -44,6 +45,7 @@ import org.apache.hadoop.mapreduce.v2.app.client.ClientService;
import org.apache.hadoop.mapreduce.v2.app.job.Job;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptContainerAssignedEvent;
import org.apache.hadoop.mapreduce.v2.app.job.impl.JobImpl;
+import org.apache.hadoop.mapreduce.v2.app.rm.ContainerFailedEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.ContainerRequestEvent;
import org.apache.hadoop.mapreduce.v2.app.rm.RMContainerAllocator;
import org.apache.hadoop.mapreduce.v2.util.MRBuilderUtils;
@@ -115,8 +117,8 @@ public class TestRMContainerAllocator {
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
- MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
- 0, 0, 0, 0, 0, 0, "jobfile"));
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+ 0, 0, 0, 0, 0, 0, "jobfile", null));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
@@ -192,8 +194,8 @@ public class TestRMContainerAllocator {
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
- MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
- 0, 0, 0, 0, 0, 0, "jobfile"));
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+ 0, 0, 0, 0, 0, 0, "jobfile", null));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
@@ -258,8 +260,8 @@ public class TestRMContainerAllocator {
JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
- MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING,
- 0, 0, 0, 0, 0, 0, "jobfile"));
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+ 0, 0, 0, 0, 0, 0, "jobfile", null));
MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
@@ -340,10 +342,10 @@ public class TestRMContainerAllocator {
public FakeJob(ApplicationAttemptId appAttemptID, Configuration conf,
int numMaps, int numReduces) {
- super(appAttemptID, conf, null, null, null, null, null, null, null,
- null);
- this.jobId = MRBuilderUtils
- .newJobId(appAttemptID.getApplicationId(), 0);
+ super(MRBuilderUtils.newJobId(appAttemptID.getApplicationId(), 0),
+ appAttemptID, conf, null, null, null, null, null, null, null, null,
+ true, null, System.currentTimeMillis(), null);
+ this.jobId = getID();
this.numMaps = numMaps;
this.numReduces = numReduces;
}
@@ -372,8 +374,8 @@ public class TestRMContainerAllocator {
@Override
public JobReport getReport() {
return MRBuilderUtils.newJobReport(this.jobId, "job", "user",
- JobState.RUNNING, 0, 0, this.setupProgress, this.mapProgress,
- this.reduceProgress, this.cleanupProgress, "jobfile");
+ JobState.RUNNING, 0, 0, 0, this.setupProgress, this.mapProgress,
+ this.reduceProgress, this.cleanupProgress, "jobfile", null);
}
}
@@ -478,6 +480,106 @@ public class TestRMContainerAllocator {
Assert.assertEquals(100.0f, app.getProgress(), 0.0);
}
+ @Test
+ public void testBlackListedNodes() throws Exception {
+
+ LOG.info("Running testBlackListedNodes");
+
+ Configuration conf = new Configuration();
+ conf.setBoolean(MRJobConfig.MR_AM_JOB_NODE_BLACKLISTING_ENABLE, true);
+ conf.setInt(MRJobConfig.MAX_TASK_FAILURES_PER_TRACKER, 1);
+
+ MyResourceManager rm = new MyResourceManager(conf);
+ rm.start();
+ DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
+ .getDispatcher();
+
+ // Submit the application
+ RMApp app = rm.submitApp(1024);
+ dispatcher.await();
+
+ MockNM amNodeManager = rm.registerNode("amNM:1234", 2048);
+ amNodeManager.nodeHeartbeat(true);
+ dispatcher.await();
+
+ ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
+ .getAppAttemptId();
+ rm.sendAMLaunched(appAttemptId);
+ dispatcher.await();
+
+ JobId jobId = MRBuilderUtils.newJobId(appAttemptId.getApplicationId(), 0);
+ Job mockJob = mock(Job.class);
+ when(mockJob.getReport()).thenReturn(
+ MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
+ 0, 0, 0, 0, 0, 0, "jobfile", null));
+ MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
+ appAttemptId, mockJob);
+
+ // add resources to scheduler
+ MockNM nodeManager1 = rm.registerNode("h1:1234", 10240);
+ MockNM nodeManager2 = rm.registerNode("h2:1234", 10240);
+ MockNM nodeManager3 = rm.registerNode("h3:1234", 10240);
+ dispatcher.await();
+
+ // create the container request
+ ContainerRequestEvent event1 = createReq(jobId, 1, 1024,
+ new String[] { "h1" });
+ allocator.sendRequest(event1);
+
+ // send 1 more request with different resource req
+ ContainerRequestEvent event2 = createReq(jobId, 2, 1024,
+ new String[] { "h2" });
+ allocator.sendRequest(event2);
+
+ // send another request with different resource and priority
+ ContainerRequestEvent event3 = createReq(jobId, 3, 1024,
+ new String[] { "h3" });
+ allocator.sendRequest(event3);
+
+ // this tells the scheduler about the requests
+ // as nodes are not added, no allocations
+ List assigned = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+ // Send events to blacklist nodes h1 and h2
+ ContainerFailedEvent f1 = createFailEvent(jobId, 1, "h1", false);
+ allocator.sendFailure(f1);
+ ContainerFailedEvent f2 = createFailEvent(jobId, 1, "h2", false);
+ allocator.sendFailure(f2);
+
+ // update resources in scheduler
+ nodeManager1.nodeHeartbeat(true); // Node heartbeat
+ nodeManager2.nodeHeartbeat(true); // Node heartbeat
+ dispatcher.await();
+
+ assigned = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+ // mark h1/h2 as bad nodes
+ nodeManager1.nodeHeartbeat(false);
+ nodeManager2.nodeHeartbeat(false);
+ dispatcher.await();
+
+ assigned = allocator.schedule();
+ dispatcher.await();
+ Assert.assertEquals("No of assignments must be 0", 0, assigned.size());
+
+ nodeManager3.nodeHeartbeat(true); // Node heartbeat
+ dispatcher.await();
+ assigned = allocator.schedule();
+ dispatcher.await();
+
+ Assert.assertTrue("No of assignments must be 3", assigned.size() == 3);
+
+ // validate that all containers are assigned to h3
+ for (TaskAttemptContainerAssignedEvent assig : assigned) {
+ Assert.assertTrue("Assigned container host not correct", "h3".equals(assig
+ .getContainer().getNodeId().getHost()));
+ }
+ }
+
private static class MyFifoScheduler extends FifoScheduler {
public MyFifoScheduler(RMContext rmContext) {
@@ -534,6 +636,19 @@ public class TestRMContainerAllocator {
new String[] { NetworkTopology.DEFAULT_RACK });
}
+ private ContainerFailedEvent createFailEvent(JobId jobId, int taskAttemptId,
+ String host, boolean reduce) {
+ TaskId taskId;
+ if (reduce) {
+ taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.REDUCE);
+ } else {
+ taskId = MRBuilderUtils.newTaskId(jobId, 0, TaskType.MAP);
+ }
+ TaskAttemptId attemptId = MRBuilderUtils.newTaskAttemptId(taskId,
+ taskAttemptId);
+ return new ContainerFailedEvent(attemptId, host);
+ }
+
private void checkAssignments(ContainerRequestEvent[] requests,
List assignments,
boolean checkHostMatch) {
@@ -653,6 +768,10 @@ public class TestRMContainerAllocator {
}
}
+ public void sendFailure(ContainerFailedEvent f) {
+ super.handle(f);
+ }
+
// API to be used by tests
public List schedule() {
// run the scheduler
@@ -672,6 +791,7 @@ public class TestRMContainerAllocator {
protected void startAllocatorThread() {
// override to NOT start thread
}
+
}
public static void main(String[] args) throws Exception {
@@ -681,5 +801,7 @@ public class TestRMContainerAllocator {
t.testMapReduceScheduling();
t.testReportedAppProgress();
t.testReportedAppProgressWithOnlyMaps();
+ t.testBlackListedNodes();
}
+
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
index 75db751480e..277b097da4f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRecovery.java
@@ -18,6 +18,9 @@
package org.apache.hadoop.mapreduce.v2.app;
+import java.io.File;
+import java.io.FileInputStream;
+import java.io.IOException;
import java.util.Iterator;
import junit.framework.Assert;
@@ -25,9 +28,21 @@ import junit.framework.Assert;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.NullWritable;
+import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.OutputFormat;
+import org.apache.hadoop.mapreduce.RecordWriter;
+import org.apache.hadoop.mapreduce.TaskAttemptContext;
+import org.apache.hadoop.mapreduce.TypeConverter;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEventHandler;
+import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
+import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
+import org.apache.hadoop.mapreduce.task.TaskAttemptContextImpl;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
@@ -36,19 +51,35 @@ import org.apache.hadoop.mapreduce.v2.app.job.Task;
import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.mapreduce.v2.app.launcher.ContainerLauncher;
+import org.apache.hadoop.util.ReflectionUtils;
import org.apache.hadoop.yarn.event.EventHandler;
import org.junit.Test;
public class TestRecovery {
private static final Log LOG = LogFactory.getLog(TestRecovery.class);
+ private static Path outputDir = new Path(new File("target",
+ TestRecovery.class.getName()).getAbsolutePath() +
+ Path.SEPARATOR + "out");
+ private static String partFile = "part-r-00000";
+ private Text key1 = new Text("key1");
+ private Text key2 = new Text("key2");
+ private Text val1 = new Text("val1");
+ private Text val2 = new Text("val2");
+
@Test
public void testCrashed() throws Exception {
+
int runCount = 0;
+ long am1StartTimeEst = System.currentTimeMillis();
MRApp app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), true, ++runCount);
Configuration conf = new Configuration();
+ conf.setBoolean("mapred.mapper.new-api", true);
+ conf.setBoolean("mapred.reducer.new-api", true);
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
long jobStartTime = job.getReport().getStartTime();
@@ -126,12 +157,16 @@ public class TestRecovery {
//stop the app
app.stop();
-
+
//rerun
//in rerun the 1st map will be recovered from previous run
+ long am2StartTimeEst = System.currentTimeMillis();
app = new MRAppWithHistory(2, 1, false, this.getClass().getName(), false, ++runCount);
conf = new Configuration();
conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
+ conf.setBoolean("mapred.mapper.new-api", true);
+ conf.setBoolean("mapred.reducer.new-api", true);
+ conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
@@ -178,14 +213,207 @@ public class TestRecovery {
task1StartTime, mapTask1.getReport().getStartTime());
Assert.assertEquals("Task Finish time not correct",
task1FinishTime, mapTask1.getReport().getFinishTime());
+ Assert.assertEquals(2, job.getAMInfos().size());
+ int attemptNum = 1;
+ // Verify AMInfo
+ for (AMInfo amInfo : job.getAMInfos()) {
+ Assert.assertEquals(attemptNum++, amInfo.getAppAttemptId()
+ .getAttemptId());
+ Assert.assertEquals(amInfo.getAppAttemptId(), amInfo.getContainerId()
+ .getApplicationAttemptId());
+ Assert.assertEquals(MRApp.NM_HOST, amInfo.getNodeManagerHost());
+ Assert.assertEquals(MRApp.NM_PORT, amInfo.getNodeManagerPort());
+ Assert.assertEquals(MRApp.NM_HTTP_PORT, amInfo.getNodeManagerHttpPort());
+ }
+ long am1StartTimeReal = job.getAMInfos().get(0).getStartTime();
+ long am2StartTimeReal = job.getAMInfos().get(1).getStartTime();
+ Assert.assertTrue(am1StartTimeReal >= am1StartTimeEst
+ && am1StartTimeReal <= am2StartTimeEst);
+ Assert.assertTrue(am2StartTimeReal >= am2StartTimeEst
+ && am2StartTimeReal <= System.currentTimeMillis());
+ // TODO Add verification of additional data from jobHistory - whatever was
+ // available in the failed attempt should be available here
}
+ @Test
+ public void testOutputRecovery() throws Exception {
+ int runCount = 0;
+ MRApp app = new MRAppWithHistory(1, 2, false, this.getClass().getName(),
+ true, ++runCount);
+ Configuration conf = new Configuration();
+ conf.setBoolean("mapred.mapper.new-api", true);
+ conf.setBoolean("mapred.reducer.new-api", true);
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+ Assert.assertEquals("No of tasks not correct",
+ 3, job.getTasks().size());
+ Iterator it = job.getTasks().values().iterator();
+ Task mapTask1 = it.next();
+ Task reduceTask1 = it.next();
+
+ // all maps must be running
+ app.waitForState(mapTask1, TaskState.RUNNING);
+
+ TaskAttempt task1Attempt1 = mapTask1.getAttempts().values().iterator()
+ .next();
+
+ //before sending the TA_DONE, event make sure attempt has come to
+ //RUNNING state
+ app.waitForState(task1Attempt1, TaskAttemptState.RUNNING);
+
+ //send the done signal to the map
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ task1Attempt1.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ //wait for map task to complete
+ app.waitForState(mapTask1, TaskState.SUCCEEDED);
+
+ // Verify the shuffle-port
+ Assert.assertEquals(5467, task1Attempt1.getShufflePort());
+
+ app.waitForState(reduceTask1, TaskState.RUNNING);
+ TaskAttempt reduce1Attempt1 = reduceTask1.getAttempts().values().iterator().next();
+
+ // write output corresponding to reduce1
+ writeOutput(reduce1Attempt1, conf);
+
+ //send the done signal to the 1st reduce
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ reduce1Attempt1.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ //wait for first reduce task to complete
+ app.waitForState(reduceTask1, TaskState.SUCCEEDED);
+
+ //stop the app before the job completes.
+ app.stop();
+
+ //rerun
+ //in rerun the map will be recovered from previous run
+ app = new MRAppWithHistory(1, 2, false, this.getClass().getName(), false,
+ ++runCount);
+ conf = new Configuration();
+ conf.setBoolean(MRJobConfig.MR_AM_JOB_RECOVERY_ENABLE, true);
+ conf.setBoolean("mapred.mapper.new-api", true);
+ conf.setBoolean("mapred.reducer.new-api", true);
+ conf.set(FileOutputFormat.OUTDIR, outputDir.toString());
+ conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
+ job = app.submit(conf);
+ app.waitForState(job, JobState.RUNNING);
+ Assert.assertEquals("No of tasks not correct",
+ 3, job.getTasks().size());
+ it = job.getTasks().values().iterator();
+ mapTask1 = it.next();
+ reduceTask1 = it.next();
+ Task reduceTask2 = it.next();
+
+ // map will be recovered, no need to send done
+ app.waitForState(mapTask1, TaskState.SUCCEEDED);
+
+ // Verify the shuffle-port after recovery
+ task1Attempt1 = mapTask1.getAttempts().values().iterator().next();
+ Assert.assertEquals(5467, task1Attempt1.getShufflePort());
+
+ // first reduce will be recovered, no need to send done
+ app.waitForState(reduceTask1, TaskState.SUCCEEDED);
+
+ app.waitForState(reduceTask2, TaskState.RUNNING);
+
+ TaskAttempt reduce2Attempt = reduceTask2.getAttempts().values()
+ .iterator().next();
+ //before sending the TA_DONE, event make sure attempt has come to
+ //RUNNING state
+ app.waitForState(reduce2Attempt, TaskAttemptState.RUNNING);
+
+ //send the done signal to the 2nd reduce task
+ app.getContext().getEventHandler().handle(
+ new TaskAttemptEvent(
+ reduce2Attempt.getID(),
+ TaskAttemptEventType.TA_DONE));
+
+ //wait to get it completed
+ app.waitForState(reduceTask2, TaskState.SUCCEEDED);
+
+ app.waitForState(job, JobState.SUCCEEDED);
+ app.verifyCompleted();
+ validateOutput();
+ }
+
+ private void writeOutput(TaskAttempt attempt, Configuration conf)
+ throws Exception {
+ TaskAttemptContext tContext = new TaskAttemptContextImpl(conf,
+ TypeConverter.fromYarn(attempt.getID()));
+
+ TextOutputFormat, ?> theOutputFormat = new TextOutputFormat();
+ RecordWriter theRecordWriter = theOutputFormat
+ .getRecordWriter(tContext);
+
+ NullWritable nullWritable = NullWritable.get();
+ try {
+ theRecordWriter.write(key1, val1);
+ theRecordWriter.write(null, nullWritable);
+ theRecordWriter.write(null, val1);
+ theRecordWriter.write(nullWritable, val2);
+ theRecordWriter.write(key2, nullWritable);
+ theRecordWriter.write(key1, null);
+ theRecordWriter.write(null, null);
+ theRecordWriter.write(key2, val2);
+ } finally {
+ theRecordWriter.close(tContext);
+ }
+
+ OutputFormat outputFormat = ReflectionUtils.newInstance(
+ tContext.getOutputFormatClass(), conf);
+ OutputCommitter committer = outputFormat.getOutputCommitter(tContext);
+ committer.commitTask(tContext);
+ }
+
+ private void validateOutput() throws IOException {
+ File expectedFile = new File(new Path(outputDir, partFile).toString());
+ StringBuffer expectedOutput = new StringBuffer();
+ expectedOutput.append(key1).append('\t').append(val1).append("\n");
+ expectedOutput.append(val1).append("\n");
+ expectedOutput.append(val2).append("\n");
+ expectedOutput.append(key2).append("\n");
+ expectedOutput.append(key1).append("\n");
+ expectedOutput.append(key2).append('\t').append(val2).append("\n");
+ String output = slurp(expectedFile);
+ Assert.assertEquals(output, expectedOutput.toString());
+ }
+
+ public static String slurp(File f) throws IOException {
+ int len = (int) f.length();
+ byte[] buf = new byte[len];
+ FileInputStream in = new FileInputStream(f);
+ String contents = null;
+ try {
+ in.read(buf, 0, len);
+ contents = new String(buf, "UTF-8");
+ } finally {
+ in.close();
+ }
+ return contents;
+ }
+
+
class MRAppWithHistory extends MRApp {
public MRAppWithHistory(int maps, int reduces, boolean autoComplete,
String testName, boolean cleanOnStart, int startCount) {
super(maps, reduces, autoComplete, testName, cleanOnStart, startCount);
}
+ @Override
+ protected ContainerLauncher createContainerLauncher(AppContext context) {
+ MockContainerLauncher launcher = new MockContainerLauncher();
+ launcher.shufflePort = 5467;
+ return launcher;
+ }
+
@Override
protected EventHandler createJobHistoryHandler(
AppContext context) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
index ca1aa14ec5f..5669070deb2 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestRuntimeEstimators.java
@@ -33,6 +33,7 @@ import java.util.concurrent.atomic.AtomicLong;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.mapreduce.JobACL;
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.Counters;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
@@ -473,6 +474,11 @@ public class TestRuntimeEstimators {
public Map getJobACLs() {
throw new UnsupportedOperationException("Not supported yet.");
}
+
+ @Override
+ public List getAMInfos() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
}
/*
@@ -682,6 +688,11 @@ public class TestRuntimeEstimators {
public String getNodeHttpAddress() {
throw new UnsupportedOperationException("Not supported yet.");
}
+
+ @Override
+ public String getNodeRackName() {
+ throw new UnsupportedOperationException("Not supported yet.");
+ }
@Override
public long getLaunchTime() {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
new file mode 100644
index 00000000000..5146acb5993
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/TestStagingCleanup.java
@@ -0,0 +1,105 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.app;
+
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyBoolean;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.verify;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+
+import junit.framework.TestCase;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.app.job.event.JobFinishEvent;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.util.BuilderUtils;
+import org.junit.Test;
+
+
+/**
+ * Make sure that the job staging directory clean up happens.
+ */
+ public class TestStagingCleanup extends TestCase {
+
+ private Configuration conf = new Configuration();
+ private FileSystem fs;
+ private String stagingJobDir = "tmpJobDir";
+ private Path stagingJobPath = new Path(stagingJobDir);
+ private final static RecordFactory recordFactory = RecordFactoryProvider.
+ getRecordFactory(null);
+ private static final Log LOG = LogFactory.getLog(TestStagingCleanup.class);
+
+ @Test
+ public void testDeletionofStaging() throws IOException {
+ conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, stagingJobDir);
+ fs = mock(FileSystem.class);
+ when(fs.delete(any(Path.class), anyBoolean())).thenReturn(true);
+ ApplicationAttemptId attemptId = recordFactory.newRecordInstance(
+ ApplicationAttemptId.class);
+ attemptId.setAttemptId(0);
+ ApplicationId appId = recordFactory.newRecordInstance(ApplicationId.class);
+ appId.setClusterTimestamp(System.currentTimeMillis());
+ appId.setId(0);
+ attemptId.setApplicationId(appId);
+ JobId jobid = recordFactory.newRecordInstance(JobId.class);
+ jobid.setAppId(appId);
+ MRAppMaster appMaster = new TestMRApp(attemptId);
+ EventHandler handler =
+ appMaster.createJobFinishEventHandler();
+ handler.handle(new JobFinishEvent(jobid));
+ verify(fs).delete(stagingJobPath, true);
+ }
+
+ private class TestMRApp extends MRAppMaster {
+
+ public TestMRApp(ApplicationAttemptId applicationAttemptId) {
+ super(applicationAttemptId, BuilderUtils.newContainerId(
+ applicationAttemptId, 1), "testhost", 2222, 3333, System
+ .currentTimeMillis());
+ }
+
+ @Override
+ protected FileSystem getFileSystem(Configuration conf) {
+ return fs;
+ }
+
+ @Override
+ protected void sysexit() {
+ }
+
+ @Override
+ public Configuration getConfig() {
+ return conf;
+ }
+ }
+
+ }
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
new file mode 100644
index 00000000000..e052b2527c0
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskAttempt.java
@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import java.util.Iterator;
+import java.util.Map;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.jobhistory.JobHistoryEvent;
+import org.apache.hadoop.mapreduce.jobhistory.TaskAttemptUnsuccessfulCompletion;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptReport;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.app.AppContext;
+import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptDiagnosticsUpdateEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskAttemptEventType;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.junit.Test;
+
+public class TestTaskAttempt{
+
+ @Test
+ public void testMRAppHistoryForMap() throws Exception {
+ MRApp app = new FailingAttemptsMRApp(1, 0);
+ testMRAppHistory(app);
+ }
+
+ @Test
+ public void testMRAppHistoryForReduce() throws Exception {
+ MRApp app = new FailingAttemptsMRApp(0, 1);
+ testMRAppHistory(app);
+ }
+
+ private void testMRAppHistory(MRApp app) throws Exception {
+ Configuration conf = new Configuration();
+ Job job = app.submit(conf);
+ app.waitForState(job, JobState.FAILED);
+ Map tasks = job.getTasks();
+
+ Assert.assertEquals("Num tasks is not correct", 1, tasks.size());
+ Task task = tasks.values().iterator().next();
+ Assert.assertEquals("Task state not correct", TaskState.FAILED, task
+ .getReport().getTaskState());
+ Map attempts = tasks.values().iterator().next()
+ .getAttempts();
+ Assert.assertEquals("Num attempts is not correct", 4, attempts.size());
+
+ Iterator it = attempts.values().iterator();
+ TaskAttemptReport report = it.next().getReport();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
+ report.getTaskAttemptState());
+ Assert.assertEquals("Diagnostic Information is not Correct",
+ "Test Diagnostic Event", report.getDiagnosticInfo());
+ report = it.next().getReport();
+ Assert.assertEquals("Attempt state not correct", TaskAttemptState.FAILED,
+ report.getTaskAttemptState());
+ }
+
+ static class FailingAttemptsMRApp extends MRApp {
+ FailingAttemptsMRApp(int maps, int reduces) {
+ super(maps, reduces, true, "FailingAttemptsMRApp", true);
+ }
+
+ @Override
+ protected void attemptLaunched(TaskAttemptId attemptID) {
+ getContext().getEventHandler().handle(
+ new TaskAttemptDiagnosticsUpdateEvent(attemptID,
+ "Test Diagnostic Event"));
+ getContext().getEventHandler().handle(
+ new TaskAttemptEvent(attemptID, TaskAttemptEventType.TA_FAILMSG));
+ }
+
+ protected EventHandler createJobHistoryHandler(
+ AppContext context) {
+ return new EventHandler() {
+ @Override
+ public void handle(JobHistoryEvent event) {
+ if (event.getType() == org.apache.hadoop.mapreduce.jobhistory.EventType.MAP_ATTEMPT_FAILED) {
+ TaskAttemptUnsuccessfulCompletion datum = (TaskAttemptUnsuccessfulCompletion) event
+ .getHistoryEvent().getDatum();
+ Assert.assertEquals("Diagnostic Information is not Correct",
+ "Test Diagnostic Event", datum.get(6).toString());
+ }
+ }
+ };
+ }
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
new file mode 100644
index 00000000000..c30ee0a2532
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/java/org/apache/hadoop/mapreduce/v2/app/job/impl/TestTaskImpl.java
@@ -0,0 +1,398 @@
+package org.apache.hadoop.mapreduce.v2.app.job.impl;
+
+import static org.junit.Assert.*;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.Collection;
+import java.util.List;
+import java.util.Set;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.JobConf;
+import org.apache.hadoop.mapred.Task;
+import org.apache.hadoop.mapred.TaskUmbilicalProtocol;
+import org.apache.hadoop.mapreduce.OutputCommitter;
+import org.apache.hadoop.mapreduce.security.token.JobTokenIdentifier;
+import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.app.TaskAttemptListener;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEvent;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskEventType;
+import org.apache.hadoop.mapreduce.v2.app.job.event.TaskTAttemptEvent;
+import org.apache.hadoop.mapreduce.v2.app.metrics.MRAppMetrics;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.yarn.Clock;
+import org.apache.hadoop.yarn.SystemClock;
+import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.server.resourcemanager.resourcetracker.InlineDispatcher;
+import org.apache.hadoop.yarn.util.Records;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+public class TestTaskImpl {
+
+ private static final Log LOG = LogFactory.getLog(TestTaskImpl.class);
+
+ private Configuration conf;
+ private TaskAttemptListener taskAttemptListener;
+ private OutputCommitter committer;
+ private Token jobToken;
+ private JobId jobId;
+ private Path remoteJobConfFile;
+ private Collection> fsTokens;
+ private Clock clock;
+ private Set completedTasksFromPreviousRun;
+ private MRAppMetrics metrics;
+ private TaskImpl mockTask;
+ private ApplicationId appId;
+ private TaskSplitMetaInfo taskSplitMetaInfo;
+ private String[] dataLocations = new String[0];
+ private final TaskType taskType = TaskType.MAP;
+
+ private int startCount = 0;
+ private int taskCounter = 0;
+ private final int partition = 1;
+
+ private InlineDispatcher dispatcher;
+ private List taskAttempts;
+
+ private class MockTaskImpl extends TaskImpl {
+
+ private int taskAttemptCounter = 0;
+
+ @SuppressWarnings("rawtypes")
+ public MockTaskImpl(JobId jobId, int partition,
+ EventHandler eventHandler, Path remoteJobConfFile, Configuration conf,
+ TaskAttemptListener taskAttemptListener, OutputCommitter committer,
+ Token jobToken,
+ Collection> fsTokens, Clock clock,
+ Set completedTasksFromPreviousRun, int startCount,
+ MRAppMetrics metrics) {
+ super(jobId, taskType , partition, eventHandler,
+ remoteJobConfFile, conf, taskAttemptListener, committer,
+ jobToken, fsTokens, clock,
+ completedTasksFromPreviousRun, startCount, metrics);
+ }
+
+ @Override
+ public TaskType getType() {
+ return taskType;
+ }
+
+ @Override
+ protected TaskAttemptImpl createAttempt() {
+ MockTaskAttemptImpl attempt = new MockTaskAttemptImpl(getID(), ++taskAttemptCounter,
+ eventHandler, taskAttemptListener, remoteJobConfFile, partition,
+ conf, committer, jobToken, fsTokens, clock);
+ taskAttempts.add(attempt);
+ return attempt;
+ }
+
+ @Override
+ protected int getMaxAttempts() {
+ return 100;
+ }
+
+ }
+
+ private class MockTaskAttemptImpl extends TaskAttemptImpl {
+
+ private float progress = 0;
+ private TaskAttemptState state = TaskAttemptState.NEW;
+ private TaskAttemptId attemptId;
+
+ @SuppressWarnings("rawtypes")
+ public MockTaskAttemptImpl(TaskId taskId, int id, EventHandler eventHandler,
+ TaskAttemptListener taskAttemptListener, Path jobFile, int partition,
+ Configuration conf, OutputCommitter committer,
+ Token jobToken,
+ Collection> fsTokens, Clock clock) {
+ super(taskId, id, eventHandler, taskAttemptListener, jobFile, partition, conf,
+ dataLocations, committer, jobToken, fsTokens, clock);
+ attemptId = Records.newRecord(TaskAttemptId.class);
+ attemptId.setId(id);
+ attemptId.setTaskId(taskId);
+ }
+
+ public TaskAttemptId getAttemptId() {
+ return attemptId;
+ }
+
+ @Override
+ protected Task createRemoteTask() {
+ return new MockTask();
+ }
+
+ public float getProgress() {
+ return progress ;
+ }
+
+ public void setProgress(float progress) {
+ this.progress = progress;
+ }
+
+ public void setState(TaskAttemptState state) {
+ this.state = state;
+ }
+
+ public TaskAttemptState getState() {
+ return state;
+ }
+
+ }
+
+ private class MockTask extends Task {
+
+ @Override
+ @SuppressWarnings("deprecation")
+ public void run(JobConf job, TaskUmbilicalProtocol umbilical)
+ throws IOException, ClassNotFoundException, InterruptedException {
+ return;
+ }
+
+ @Override
+ public boolean isMapTask() {
+ return true;
+ }
+
+ }
+
+ @Before
+ @SuppressWarnings("unchecked")
+ public void setup() {
+ dispatcher = new InlineDispatcher();
+
+ ++startCount;
+
+ conf = new Configuration();
+ taskAttemptListener = mock(TaskAttemptListener.class);
+ committer = mock(OutputCommitter.class);
+ jobToken = (Token) mock(Token.class);
+ remoteJobConfFile = mock(Path.class);
+ fsTokens = null;
+ clock = new SystemClock();
+ metrics = mock(MRAppMetrics.class);
+ dataLocations = new String[1];
+
+ appId = Records.newRecord(ApplicationId.class);
+ appId.setClusterTimestamp(System.currentTimeMillis());
+ appId.setId(1);
+
+ jobId = Records.newRecord(JobId.class);
+ jobId.setId(1);
+ jobId.setAppId(appId);
+
+ taskSplitMetaInfo = mock(TaskSplitMetaInfo.class);
+ when(taskSplitMetaInfo.getLocations()).thenReturn(dataLocations);
+
+ taskAttempts = new ArrayList();
+
+ mockTask = new MockTaskImpl(jobId, partition, dispatcher.getEventHandler(),
+ remoteJobConfFile, conf, taskAttemptListener, committer, jobToken,
+ fsTokens, clock,
+ completedTasksFromPreviousRun, startCount,
+ metrics);
+
+ }
+
+ @After
+ public void teardown() {
+ taskAttempts.clear();
+ }
+
+ private TaskId getNewTaskID() {
+ TaskId taskId = Records.newRecord(TaskId.class);
+ taskId.setId(++taskCounter);
+ taskId.setJobId(jobId);
+ taskId.setTaskType(mockTask.getType());
+ return taskId;
+ }
+
+ private void scheduleTaskAttempt(TaskId taskId) {
+ mockTask.handle(new TaskEvent(taskId,
+ TaskEventType.T_SCHEDULE));
+ assertTaskScheduledState();
+ }
+
+ private void killTask(TaskId taskId) {
+ mockTask.handle(new TaskEvent(taskId,
+ TaskEventType.T_KILL));
+ assertTaskKillWaitState();
+ }
+
+ private void killScheduledTaskAttempt(TaskAttemptId attemptId) {
+ mockTask.handle(new TaskTAttemptEvent(attemptId,
+ TaskEventType.T_ATTEMPT_KILLED));
+ assertTaskScheduledState();
+ }
+
+ private void launchTaskAttempt(TaskAttemptId attemptId) {
+ mockTask.handle(new TaskTAttemptEvent(attemptId,
+ TaskEventType.T_ATTEMPT_LAUNCHED));
+ assertTaskRunningState();
+ }
+
+ private MockTaskAttemptImpl getLastAttempt() {
+ return taskAttempts.get(taskAttempts.size()-1);
+ }
+
+ private void updateLastAttemptProgress(float p) {
+ getLastAttempt().setProgress(p);
+ }
+
+ private void updateLastAttemptState(TaskAttemptState s) {
+ getLastAttempt().setState(s);
+ }
+
+ private void killRunningTaskAttempt(TaskAttemptId attemptId) {
+ mockTask.handle(new TaskTAttemptEvent(attemptId,
+ TaskEventType.T_ATTEMPT_KILLED));
+ assertTaskRunningState();
+ }
+
+ /**
+ * {@link TaskState#NEW}
+ */
+ private void assertTaskNewState() {
+ assertEquals(mockTask.getState(), TaskState.NEW);
+ }
+
+ /**
+ * {@link TaskState#SCHEDULED}
+ */
+ private void assertTaskScheduledState() {
+ assertEquals(mockTask.getState(), TaskState.SCHEDULED);
+ }
+
+ /**
+ * {@link TaskState#RUNNING}
+ */
+ private void assertTaskRunningState() {
+ assertEquals(mockTask.getState(), TaskState.RUNNING);
+ }
+
+ /**
+ * {@link TaskState#KILL_WAIT}
+ */
+ private void assertTaskKillWaitState() {
+ assertEquals(mockTask.getState(), TaskState.KILL_WAIT);
+ }
+
+ @Test
+ public void testInit() {
+ LOG.info("--- START: testInit ---");
+ assertTaskNewState();
+ assert(taskAttempts.size() == 0);
+ }
+
+ @Test
+ /**
+ * {@link TaskState#NEW}->{@link TaskState#SCHEDULED}
+ */
+ public void testScheduleTask() {
+ LOG.info("--- START: testScheduleTask ---");
+ TaskId taskId = getNewTaskID();
+ scheduleTaskAttempt(taskId);
+ }
+
+ @Test
+ /**
+ * {@link TaskState#SCHEDULED}->{@link TaskState#KILL_WAIT}
+ */
+ public void testKillScheduledTask() {
+ LOG.info("--- START: testKillScheduledTask ---");
+ TaskId taskId = getNewTaskID();
+ scheduleTaskAttempt(taskId);
+ killTask(taskId);
+ }
+
+ @Test
+ /**
+ * Kill attempt
+ * {@link TaskState#SCHEDULED}->{@link TaskState#SCHEDULED}
+ */
+ public void testKillScheduledTaskAttempt() {
+ LOG.info("--- START: testKillScheduledTaskAttempt ---");
+ TaskId taskId = getNewTaskID();
+ scheduleTaskAttempt(taskId);
+ killScheduledTaskAttempt(getLastAttempt().getAttemptId());
+ }
+
+ @Test
+ /**
+ * Launch attempt
+ * {@link TaskState#SCHEDULED}->{@link TaskState#RUNNING}
+ */
+ public void testLaunchTaskAttempt() {
+ LOG.info("--- START: testLaunchTaskAttempt ---");
+ TaskId taskId = getNewTaskID();
+ scheduleTaskAttempt(taskId);
+ launchTaskAttempt(getLastAttempt().getAttemptId());
+ }
+
+ @Test
+ /**
+ * Kill running attempt
+ * {@link TaskState#RUNNING}->{@link TaskState#RUNNING}
+ */
+ public void testKillRunningTaskAttempt() {
+ LOG.info("--- START: testKillRunningTaskAttempt ---");
+ TaskId taskId = getNewTaskID();
+ scheduleTaskAttempt(taskId);
+ launchTaskAttempt(getLastAttempt().getAttemptId());
+ killRunningTaskAttempt(getLastAttempt().getAttemptId());
+ }
+
+ @Test
+ public void testTaskProgress() {
+ LOG.info("--- START: testTaskProgress ---");
+
+ // launch task
+ TaskId taskId = getNewTaskID();
+ scheduleTaskAttempt(taskId);
+ float progress = 0f;
+ assert(mockTask.getProgress() == progress);
+ launchTaskAttempt(getLastAttempt().getAttemptId());
+
+ // update attempt1
+ progress = 50f;
+ updateLastAttemptProgress(progress);
+ assert(mockTask.getProgress() == progress);
+ progress = 100f;
+ updateLastAttemptProgress(progress);
+ assert(mockTask.getProgress() == progress);
+
+ progress = 0f;
+ // mark first attempt as killed
+ updateLastAttemptState(TaskAttemptState.KILLED);
+ assert(mockTask.getProgress() == progress);
+
+ // kill first attempt
+ // should trigger a new attempt
+ // as no successful attempts
+ killRunningTaskAttempt(getLastAttempt().getAttemptId());
+ assert(taskAttempts.size() == 2);
+
+ assert(mockTask.getProgress() == 0f);
+ launchTaskAttempt(getLastAttempt().getAttemptId());
+ progress = 50f;
+ updateLastAttemptProgress(progress);
+ assert(mockTask.getProgress() == progress);
+
+ }
+
+}
diff --git a/hadoop-mapreduce-project/src/contrib/dynamic-scheduler/ivy/libraries.properties b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
similarity index 64%
rename from hadoop-mapreduce-project/src/contrib/dynamic-scheduler/ivy/libraries.properties
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
index 8a80dd81a99..531b68b5a9f 100644
--- a/hadoop-mapreduce-project/src/contrib/dynamic-scheduler/ivy/libraries.properties
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/test/resources/log4j.properties
@@ -10,8 +10,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-#This properties file lists the versions of the various artifacts used by streaming.
-#It drives ivy and the generation of a maven POM
+# log4j configuration used during build and unit tests
-#Please list the dependencies name with version if they are different from the ones
-#listed in the global libraries.properties file (in alphabetical order)
+log4j.rootLogger=info,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
index d367a9ead20..cb199ac70a9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
@@ -16,16 +16,17 @@
hadoop-mapreduce-clientorg.apache.hadoop
- ${hadoop-mapreduce.version}
+ 0.24.0-SNAPSHOT4.0.0org.apache.hadoophadoop-mapreduce-client-common
+ 0.24.0-SNAPSHOThadoop-mapreduce-client-common
- ${project.artifact.file}
- ${project.parent.parent.basedir}
+
+ ${project.parent.basedir}/../
diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
similarity index 96%
rename from hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
index d09b222ee9b..2b6ad992ceb 100644
--- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalClientProtocolProvider.java
@@ -34,7 +34,7 @@ public class LocalClientProtocolProvider extends ClientProtocolProvider {
@Override
public ClientProtocol create(Configuration conf) throws IOException {
String framework = conf.get(MRConfig.FRAMEWORK_NAME);
- if (framework != null && !framework.equals("local")) {
+ if (framework != null && !framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME)) {
return null;
}
String tracker = conf.get(JTConfig.JT_IPC_ADDRESS, "local");
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
new file mode 100644
index 00000000000..19f558c6726
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalDistributedCacheManager.java
@@ -0,0 +1,215 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapred;
+
+import com.google.common.collect.Maps;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.MalformedURLException;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.net.URLClassLoader;
+import java.security.AccessController;
+import java.security.PrivilegedAction;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.LinkedHashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.LocalDirAllocator;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapreduce.MRConfig;
+import org.apache.hadoop.mapreduce.MRJobConfig;
+import org.apache.hadoop.mapreduce.filecache.DistributedCache;
+import org.apache.hadoop.mapreduce.v2.util.MRApps;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.api.records.LocalResource;
+import org.apache.hadoop.yarn.api.records.LocalResourceType;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+import org.apache.hadoop.yarn.util.FSDownload;
+
+/**
+ * A helper class for managing the distributed cache for {@link LocalJobRunner}.
+ */
+@SuppressWarnings("deprecation")
+class LocalDistributedCacheManager {
+ public static final Log LOG =
+ LogFactory.getLog(LocalDistributedCacheManager.class);
+
+ private List localArchives = new ArrayList();
+ private List localFiles = new ArrayList();
+ private List localClasspaths = new ArrayList();
+
+ private boolean setupCalled = false;
+
+ /**
+ * Set up the distributed cache by localizing the resources, and updating
+ * the configuration with references to the localized resources.
+ * @param conf
+ * @throws IOException
+ */
+ public void setup(JobConf conf) throws IOException {
+ // Generate YARN local resources objects corresponding to the distributed
+ // cache configuration
+ Map localResources =
+ new LinkedHashMap();
+ MRApps.setupDistributedCache(conf, localResources);
+
+ // Find which resources are to be put on the local classpath
+ Map classpaths = new HashMap();
+ Path[] archiveClassPaths = DistributedCache.getArchiveClassPaths(conf);
+ if (archiveClassPaths != null) {
+ for (Path p : archiveClassPaths) {
+ FileSystem remoteFS = p.getFileSystem(conf);
+ p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
+ remoteFS.getWorkingDirectory()));
+ classpaths.put(p.toUri().getPath().toString(), p);
+ }
+ }
+ Path[] fileClassPaths = DistributedCache.getFileClassPaths(conf);
+ if (fileClassPaths != null) {
+ for (Path p : fileClassPaths) {
+ FileSystem remoteFS = p.getFileSystem(conf);
+ p = remoteFS.resolvePath(p.makeQualified(remoteFS.getUri(),
+ remoteFS.getWorkingDirectory()));
+ classpaths.put(p.toUri().getPath().toString(), p);
+ }
+ }
+
+ // Localize the resources
+ LocalDirAllocator localDirAllocator =
+ new LocalDirAllocator(MRConfig.LOCAL_DIR);
+ FileContext localFSFileContext = FileContext.getLocalFSFileContext();
+ UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+
+ Map> resourcesToPaths = Maps.newHashMap();
+ ExecutorService exec = Executors.newCachedThreadPool();
+ for (LocalResource resource : localResources.values()) {
+ Callable download = new FSDownload(localFSFileContext, ugi, conf,
+ localDirAllocator, resource, new Random());
+ Future future = exec.submit(download);
+ resourcesToPaths.put(resource, future);
+ }
+ for (LocalResource resource : localResources.values()) {
+ Path path;
+ try {
+ path = resourcesToPaths.get(resource).get();
+ } catch (InterruptedException e) {
+ throw new IOException(e);
+ } catch (ExecutionException e) {
+ throw new IOException(e);
+ }
+ String pathString = path.toUri().toString();
+ if (resource.getType() == LocalResourceType.ARCHIVE) {
+ localArchives.add(pathString);
+ } else if (resource.getType() == LocalResourceType.FILE) {
+ localFiles.add(pathString);
+ }
+ Path resourcePath;
+ try {
+ resourcePath = ConverterUtils.getPathFromYarnURL(resource.getResource());
+ } catch (URISyntaxException e) {
+ throw new IOException(e);
+ }
+ LOG.info(String.format("Localized %s as %s", resourcePath, path));
+ String cp = resourcePath.toUri().getPath();
+ if (classpaths.keySet().contains(cp)) {
+ localClasspaths.add(path.toUri().getPath().toString());
+ }
+ }
+
+ // Update the configuration object with localized data.
+ if (!localArchives.isEmpty()) {
+ conf.set(MRJobConfig.CACHE_LOCALARCHIVES, StringUtils
+ .arrayToString(localArchives.toArray(new String[localArchives
+ .size()])));
+ }
+ if (!localFiles.isEmpty()) {
+ conf.set(MRJobConfig.CACHE_LOCALFILES, StringUtils
+ .arrayToString(localFiles.toArray(new String[localArchives
+ .size()])));
+ }
+ if (DistributedCache.getSymlink(conf)) {
+ // This is not supported largely because,
+ // for a Child subprocess, the cwd in LocalJobRunner
+ // is not a fresh slate, but rather the user's working directory.
+ // This is further complicated because the logic in
+ // setupWorkDir only creates symlinks if there's a jarfile
+ // in the configuration.
+ LOG.warn("LocalJobRunner does not support " +
+ "symlinking into current working dir.");
+ }
+ setupCalled = true;
+ }
+
+ /**
+ * Are the resources that should be added to the classpath?
+ * Should be called after setup().
+ *
+ */
+ public boolean hasLocalClasspaths() {
+ if (!setupCalled) {
+ throw new IllegalStateException(
+ "hasLocalClasspaths() should be called after setup()");
+ }
+ return !localClasspaths.isEmpty();
+ }
+
+ /**
+ * Creates a class loader that includes the designated
+ * files and archives.
+ */
+ public ClassLoader makeClassLoader(final ClassLoader parent)
+ throws MalformedURLException {
+ final URL[] urls = new URL[localClasspaths.size()];
+ for (int i = 0; i < localClasspaths.size(); ++i) {
+ urls[i] = new File(localClasspaths.get(i)).toURI().toURL();
+ LOG.info(urls[i]);
+ }
+ return AccessController.doPrivileged(new PrivilegedAction() {
+ @Override
+ public ClassLoader run() {
+ return new URLClassLoader(urls, parent);
+ }
+ });
+ }
+
+ public void close() throws IOException {
+ FileContext localFSFileContext = FileContext.getLocalFSFileContext();
+ for (String archive : localArchives) {
+ localFSFileContext.delete(new Path(archive), true);
+ }
+ for (String file : localFiles) {
+ localFSFileContext.delete(new Path(file), true);
+ }
+ }
+}
diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LocalJobRunner.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
similarity index 91%
rename from hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
index 4d05e406177..c8b59ebdac3 100644
--- a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/LocalJobRunner.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunner.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.mapred;
-import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.util.ArrayList;
@@ -27,8 +26,8 @@ import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;
-import java.util.concurrent.Executors;
import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
@@ -38,27 +37,23 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.LocalDirAllocator;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.ipc.ProtocolSignature;
+import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
import org.apache.hadoop.mapreduce.ClusterMetrics;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.mapreduce.QueueInfo;
import org.apache.hadoop.mapreduce.TaskCompletionEvent;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapreduce.Cluster.JobTrackerStatus;
-import org.apache.hadoop.mapreduce.filecache.DistributedCache;
-import org.apache.hadoop.mapreduce.filecache.TaskDistributedCacheManager;
-import org.apache.hadoop.mapreduce.filecache.TrackerDistributedCacheManager;
import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
-import org.apache.hadoop.mapreduce.server.jobtracker.State;
-import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
import org.apache.hadoop.mapreduce.split.JobSplit.TaskSplitMetaInfo;
+import org.apache.hadoop.mapreduce.split.SplitMetaInfoReader;
+import org.apache.hadoop.mapreduce.v2.LogParams;
+import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.authorize.AccessControlList;
import org.apache.hadoop.security.token.Token;
@@ -66,6 +61,7 @@ import org.apache.hadoop.security.token.Token;
/** Implements MapReduce locally, in-process, for debugging. */
@InterfaceAudience.Private
@InterfaceStability.Unstable
+@SuppressWarnings("deprecation")
public class LocalJobRunner implements ClientProtocol {
public static final Log LOG =
LogFactory.getLog(LocalJobRunner.class);
@@ -81,7 +77,7 @@ public class LocalJobRunner implements ClientProtocol {
private int reduce_tasks = 0;
final Random rand = new Random();
- private JobTrackerInstrumentation myMetrics = null;
+ private LocalJobRunnerMetrics myMetrics = null;
private static final String jobDir = "localRunner/";
@@ -124,8 +120,7 @@ public class LocalJobRunner implements ClientProtocol {
private FileSystem localFs;
boolean killed = false;
- private TrackerDistributedCacheManager trackerDistributerdCacheManager;
- private TaskDistributedCacheManager taskDistributedCacheManager;
+ private LocalDistributedCacheManager localDistributedCacheManager;
public long getProtocolVersion(String protocol, long clientVersion) {
return TaskUmbilicalProtocol.versionID;
@@ -149,27 +144,8 @@ public class LocalJobRunner implements ClientProtocol {
// Manage the distributed cache. If there are files to be copied,
// this will trigger localFile to be re-written again.
- this.trackerDistributerdCacheManager =
- new TrackerDistributedCacheManager(conf, new DefaultTaskController());
- this.taskDistributedCacheManager =
- trackerDistributerdCacheManager.newTaskDistributedCacheManager(conf);
- taskDistributedCacheManager.setup(
- new LocalDirAllocator(MRConfig.LOCAL_DIR),
- new File(systemJobDir.toString()),
- "archive", "archive");
-
- if (DistributedCache.getSymlink(conf)) {
- // This is not supported largely because,
- // for a Child subprocess, the cwd in LocalJobRunner
- // is not a fresh slate, but rather the user's working directory.
- // This is further complicated because the logic in
- // setupWorkDir only creates symlinks if there's a jarfile
- // in the configuration.
- LOG.warn("LocalJobRunner does not support " +
- "symlinking into current working dir.");
- }
- // Setup the symlinks for the distributed cache.
- TaskRunner.setupWorkDir(conf, new File(localJobDir.toUri()).getAbsoluteFile());
+ localDistributedCacheManager = new LocalDistributedCacheManager();
+ localDistributedCacheManager.setup(conf);
// Write out configuration file. Instead of copying it from
// systemJobFile, we re-write it, since setup(), above, may have
@@ -183,8 +159,8 @@ public class LocalJobRunner implements ClientProtocol {
this.job = new JobConf(localJobFile);
// Job (the current object) is a Thread, so we wrap its class loader.
- if (!taskDistributedCacheManager.getClassPaths().isEmpty()) {
- setContextClassLoader(taskDistributedCacheManager.makeClassLoader(
+ if (localDistributedCacheManager.hasLocalClasspaths()) {
+ setContextClassLoader(localDistributedCacheManager.makeClassLoader(
getContextClassLoader()));
}
@@ -199,10 +175,6 @@ public class LocalJobRunner implements ClientProtocol {
this.start();
}
- JobProfile getProfile() {
- return profile;
- }
-
/**
* A Runnable instance that handles a map task to be run by an executor.
*/
@@ -238,7 +210,7 @@ public class LocalJobRunner implements ClientProtocol {
info.getSplitIndex(), 1);
map.setUser(UserGroupInformation.getCurrentUser().
getShortUserName());
- TaskRunner.setupChildMapredLocalDirs(map, localConf);
+ setupChildMapredLocalDirs(map, localConf);
MapOutputFile mapOutput = new MROutputFiles();
mapOutput.setConf(localConf);
@@ -332,7 +304,6 @@ public class LocalJobRunner implements ClientProtocol {
return executor;
}
- @SuppressWarnings("unchecked")
@Override
public void run() {
JobID jobId = profile.getJobID();
@@ -398,7 +369,7 @@ public class LocalJobRunner implements ClientProtocol {
getShortUserName());
JobConf localConf = new JobConf(job);
localConf.set("mapreduce.jobtracker.address", "local");
- TaskRunner.setupChildMapredLocalDirs(reduce, localConf);
+ setupChildMapredLocalDirs(reduce, localConf);
// move map output to reduce input
for (int i = 0; i < mapIds.size(); i++) {
if (!this.isInterrupted()) {
@@ -472,8 +443,7 @@ public class LocalJobRunner implements ClientProtocol {
fs.delete(systemJobFile.getParent(), true); // delete submit dir
localFs.delete(localJobFile, true); // delete local copy
// Cleanup distributed cache
- taskDistributedCacheManager.release();
- trackerDistributerdCacheManager.purgeCache();
+ localDistributedCacheManager.close();
} catch (IOException e) {
LOG.warn("Error cleaning up "+id+": "+e);
}
@@ -592,7 +562,7 @@ public class LocalJobRunner implements ClientProtocol {
public LocalJobRunner(JobConf conf) throws IOException {
this.fs = FileSystem.getLocal(conf);
this.conf = conf;
- myMetrics = new JobTrackerMetricsInst(null, new JobConf(conf));
+ myMetrics = new LocalJobRunnerMetrics(new JobConf(conf));
}
// JobSubmissionProtocol methods
@@ -660,14 +630,6 @@ public class LocalJobRunner implements ClientProtocol {
reduce_tasks, 0, 0, 1, 1, jobs.size(), 1, 0, 0);
}
- /**
- * @deprecated Use {@link #getJobTrackerStatus()} instead.
- */
- @Deprecated
- public State getJobTrackerState() throws IOException, InterruptedException {
- return State.RUNNING;
- }
-
public JobTrackerStatus getJobTrackerStatus() {
return JobTrackerStatus.RUNNING;
}
@@ -722,7 +684,7 @@ public class LocalJobRunner implements ClientProtocol {
}
/**
- * @see org.apache.hadoop.mapred.JobSubmissionProtocol#getQueueAdmins()
+ * @see org.apache.hadoop.mapreduce.protocol.ClientProtocol#getQueueAdmins(String)
*/
public AccessControlList getQueueAdmins(String queueName) throws IOException {
return new AccessControlList(" ");// no queue admins for local job runner
@@ -812,4 +774,44 @@ public class LocalJobRunner implements ClientProtocol {
) throws IOException,InterruptedException{
return 0;
}
+
+ @Override
+ public LogParams getLogFileParams(org.apache.hadoop.mapreduce.JobID jobID,
+ org.apache.hadoop.mapreduce.TaskAttemptID taskAttemptID)
+ throws IOException, InterruptedException {
+ throw new UnsupportedOperationException("Not supported");
+ }
+
+ static void setupChildMapredLocalDirs(Task t, JobConf conf) {
+ String[] localDirs = conf.getTrimmedStrings(MRConfig.LOCAL_DIR);
+ String jobId = t.getJobID().toString();
+ String taskId = t.getTaskID().toString();
+ boolean isCleanup = t.isTaskCleanupTask();
+ String user = t.getUser();
+ StringBuffer childMapredLocalDir =
+ new StringBuffer(localDirs[0] + Path.SEPARATOR
+ + getLocalTaskDir(user, jobId, taskId, isCleanup));
+ for (int i = 1; i < localDirs.length; i++) {
+ childMapredLocalDir.append("," + localDirs[i] + Path.SEPARATOR
+ + getLocalTaskDir(user, jobId, taskId, isCleanup));
+ }
+ LOG.debug(MRConfig.LOCAL_DIR + " for child : " + childMapredLocalDir);
+ conf.set(MRConfig.LOCAL_DIR, childMapredLocalDir.toString());
+ }
+
+ static final String TASK_CLEANUP_SUFFIX = ".cleanup";
+ static final String SUBDIR = jobDir;
+ static final String JOBCACHE = "jobcache";
+
+ static String getLocalTaskDir(String user, String jobid, String taskid,
+ boolean isCleanupAttempt) {
+ String taskDir = SUBDIR + Path.SEPARATOR + user + Path.SEPARATOR + JOBCACHE
+ + Path.SEPARATOR + jobid + Path.SEPARATOR + taskid;
+ if (isCleanupAttempt) {
+ taskDir = taskDir + TASK_CLEANUP_SUFFIX;
+ }
+ return taskDir;
+ }
+
+
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunnerMetrics.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunnerMetrics.java
new file mode 100644
index 00000000000..aec70edefc2
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapred/LocalJobRunnerMetrics.java
@@ -0,0 +1,98 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import org.apache.hadoop.metrics.MetricsContext;
+import org.apache.hadoop.metrics.MetricsRecord;
+import org.apache.hadoop.metrics.MetricsUtil;
+import org.apache.hadoop.metrics.Updater;
+import org.apache.hadoop.metrics.jvm.JvmMetrics;
+
+@SuppressWarnings("deprecation")
+class LocalJobRunnerMetrics implements Updater {
+ private final MetricsRecord metricsRecord;
+
+ private int numMapTasksLaunched = 0;
+ private int numMapTasksCompleted = 0;
+ private int numReduceTasksLaunched = 0;
+ private int numReduceTasksCompleted = 0;
+ private int numWaitingMaps = 0;
+ private int numWaitingReduces = 0;
+
+ public LocalJobRunnerMetrics(JobConf conf) {
+ String sessionId = conf.getSessionId();
+ // Initiate JVM Metrics
+ JvmMetrics.init("JobTracker", sessionId);
+ // Create a record for map-reduce metrics
+ MetricsContext context = MetricsUtil.getContext("mapred");
+ // record name is jobtracker for compatibility
+ metricsRecord = MetricsUtil.createRecord(context, "jobtracker");
+ metricsRecord.setTag("sessionId", sessionId);
+ context.registerUpdater(this);
+ }
+
+ /**
+ * Since this object is a registered updater, this method will be called
+ * periodically, e.g. every 5 seconds.
+ */
+ public void doUpdates(MetricsContext unused) {
+ synchronized (this) {
+ metricsRecord.incrMetric("maps_launched", numMapTasksLaunched);
+ metricsRecord.incrMetric("maps_completed", numMapTasksCompleted);
+ metricsRecord.incrMetric("reduces_launched", numReduceTasksLaunched);
+ metricsRecord.incrMetric("reduces_completed", numReduceTasksCompleted);
+ metricsRecord.incrMetric("waiting_maps", numWaitingMaps);
+ metricsRecord.incrMetric("waiting_reduces", numWaitingReduces);
+
+ numMapTasksLaunched = 0;
+ numMapTasksCompleted = 0;
+ numReduceTasksLaunched = 0;
+ numReduceTasksCompleted = 0;
+ numWaitingMaps = 0;
+ numWaitingReduces = 0;
+ }
+ metricsRecord.update();
+ }
+
+ public synchronized void launchMap(TaskAttemptID taskAttemptID) {
+ ++numMapTasksLaunched;
+ decWaitingMaps(taskAttemptID.getJobID(), 1);
+ }
+
+ public synchronized void completeMap(TaskAttemptID taskAttemptID) {
+ ++numMapTasksCompleted;
+ }
+
+ public synchronized void launchReduce(TaskAttemptID taskAttemptID) {
+ ++numReduceTasksLaunched;
+ decWaitingReduces(taskAttemptID.getJobID(), 1);
+ }
+
+ public synchronized void completeReduce(TaskAttemptID taskAttemptID) {
+ ++numReduceTasksCompleted;
+ }
+
+ private synchronized void decWaitingMaps(JobID id, int task) {
+ numWaitingMaps -= task;
+ }
+
+ private synchronized void decWaitingReduces(JobID id, int task){
+ numWaitingReduces -= task;
+ }
+
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
index be6e6d9f20e..a9382130735 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/TypeConverter.java
@@ -380,6 +380,7 @@ public class TypeConverter {
public static JobStatus.State fromYarn(YarnApplicationState state) {
switch (state) {
+ case NEW:
case SUBMITTED:
return State.PREP;
case RUNNING:
@@ -425,6 +426,11 @@ public class TypeConverter {
jobStatus.setSchedulingInfo(trackingUrl); // Set AM tracking url
jobStatus.setStartTime(application.getStartTime());
jobStatus.setFailureInfo(application.getDiagnostics());
+ jobStatus.setNeededMem(application.getApplicationResourceUsageReport().getNeededResources().getMemory());
+ jobStatus.setNumReservedSlots(application.getApplicationResourceUsageReport().getNumReservedContainers());
+ jobStatus.setNumUsedSlots(application.getApplicationResourceUsageReport().getNumUsedContainers());
+ jobStatus.setReservedMem(application.getApplicationResourceUsageReport().getReservedResources().getMemory());
+ jobStatus.setUsedMem(application.getApplicationResourceUsageReport().getUsedResources().getMemory());
return jobStatus;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/package-info.java
new file mode 100644
index 00000000000..8d414d6b2e0
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/client/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.api.impl.pb.client;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/package-info.java
new file mode 100644
index 00000000000..cf93982eb95
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/impl/pb/service/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.api.impl.pb.service;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/package-info.java
new file mode 100644
index 00000000000..f31655223f0
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.api;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/package-info.java
new file mode 100644
index 00000000000..9fafb4acdab
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/impl/pb/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords.impl.pb;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/package-info.java
new file mode 100644
index 00000000000..cb534304a57
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/protocolrecords/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.api.protocolrecords;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/FakeDynamicScheduler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/AMInfo.java
similarity index 52%
rename from hadoop-mapreduce-project/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/FakeDynamicScheduler.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/AMInfo.java
index c596ebcc40d..1cd14ff312c 100644
--- a/hadoop-mapreduce-project/src/contrib/dynamic-scheduler/src/test/org/apache/hadoop/mapred/FakeDynamicScheduler.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/AMInfo.java
@@ -16,30 +16,23 @@
* limitations under the License.
*/
-package org.apache.hadoop.mapred;
+package org.apache.hadoop.mapreduce.v2.api.records;
-import java.io.IOException;
-import java.util.List;
-import java.util.Collection;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
-import org.apache.hadoop.mapreduce.server.jobtracker.TaskTracker;
-
-/**
- * Mock queue scheduler for testing only
- */
-public class FakeDynamicScheduler extends QueueTaskScheduler {
- public void start() throws IOException {
- }
- public void terminate() throws IOException {
- }
- public List assignTasks(TaskTracker taskTracker)
- throws IOException {
- return null;
- }
- public Collection getJobs(String queueName) {
- return null;
- }
- public void setAllocator(QueueAllocator allocator) {
- }
-}
+public interface AMInfo {
+ public ApplicationAttemptId getAppAttemptId();
+ public long getStartTime();
+ public ContainerId getContainerId();
+ public String getNodeManagerHost();
+ public int getNodeManagerPort();
+ public int getNodeManagerHttpPort();
+ public void setAppAttemptId(ApplicationAttemptId appAttemptId);
+ public void setStartTime(long startTime);
+ public void setContainerId(ContainerId containerId);
+ public void setNodeManagerHost(String nmHost);
+ public void setNodeManagerPort(int nmPort);
+ public void setNodeManagerHttpPort(int mnHttpPort);
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
index 87b77b7f80c..469c425febf 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/JobReport.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.mapreduce.v2.api.records;
+import java.util.List;
+
public interface JobReport {
public abstract JobId getJobId();
public abstract JobState getJobState();
@@ -25,6 +27,7 @@ public interface JobReport {
public abstract float getReduceProgress();
public abstract float getCleanupProgress();
public abstract float getSetupProgress();
+ public abstract long getSubmitTime();
public abstract long getStartTime();
public abstract long getFinishTime();
public abstract String getUser();
@@ -32,6 +35,7 @@ public interface JobReport {
public abstract String getTrackingUrl();
public abstract String getDiagnostics();
public abstract String getJobFile();
+ public abstract List getAMInfos();
public abstract void setJobId(JobId jobId);
public abstract void setJobState(JobState jobState);
@@ -39,6 +43,7 @@ public interface JobReport {
public abstract void setReduceProgress(float progress);
public abstract void setCleanupProgress(float progress);
public abstract void setSetupProgress(float progress);
+ public abstract void setSubmitTime(long submitTime);
public abstract void setStartTime(long startTime);
public abstract void setFinishTime(long finishTime);
public abstract void setUser(String user);
@@ -46,4 +51,5 @@ public interface JobReport {
public abstract void setTrackingUrl(String trackingUrl);
public abstract void setDiagnostics(String diagnostics);
public abstract void setJobFile(String jobFile);
+ public abstract void setAMInfos(List amInfos);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
index 4617258f32f..bc0a4c6b4f6 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/TaskAttemptReport.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.mapreduce.v2.api.records;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+
public interface TaskAttemptReport {
public abstract TaskAttemptId getTaskAttemptId();
public abstract TaskAttemptState getTaskAttemptState();
@@ -32,6 +34,10 @@ public interface TaskAttemptReport {
public abstract String getDiagnosticInfo();
public abstract String getStateString();
public abstract Phase getPhase();
+ public abstract String getNodeManagerHost();
+ public abstract int getNodeManagerPort();
+ public abstract int getNodeManagerHttpPort();
+ public abstract ContainerId getContainerId();
public abstract void setTaskAttemptId(TaskAttemptId taskAttemptId);
public abstract void setTaskAttemptState(TaskAttemptState taskAttemptState);
@@ -42,6 +48,10 @@ public interface TaskAttemptReport {
public abstract void setDiagnosticInfo(String diagnosticInfo);
public abstract void setStateString(String stateString);
public abstract void setPhase(Phase phase);
+ public abstract void setNodeManagerHost(String nmHost);
+ public abstract void setNodeManagerPort(int nmPort);
+ public abstract void setNodeManagerHttpPort(int nmHttpPort);
+ public abstract void setContainerId(ContainerId containerId);
/**
* Set the shuffle finish time. Applicable only for reduce attempts
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/AMInfoPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/AMInfoPBImpl.java
new file mode 100644
index 00000000000..325d9a88615
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/AMInfoPBImpl.java
@@ -0,0 +1,201 @@
+/**
+* Licensed to the Apache Software Foundation (ASF) under one
+* or more contributor license agreements. See the NOTICE file
+* distributed with this work for additional information
+* regarding copyright ownership. The ASF licenses this file
+* to you under the Apache License, Version 2.0 (the
+* "License"); you may not use this file except in compliance
+* with the License. You may obtain a copy of the License at
+*
+* http://www.apache.org/licenses/LICENSE-2.0
+*
+* Unless required by applicable law or agreed to in writing, software
+* distributed under the License is distributed on an "AS IS" BASIS,
+* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+* See the License for the specific language governing permissions and
+* limitations under the License.
+*/
+
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.AMInfoProto;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.AMInfoProtoOrBuilder;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationAttemptIdPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ApplicationAttemptIdProto;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
+
+public class AMInfoPBImpl extends ProtoBase implements AMInfo {
+
+ AMInfoProto proto = AMInfoProto.getDefaultInstance();
+ AMInfoProto.Builder builder = null;
+ boolean viaProto = false;
+
+ private ApplicationAttemptId appAttemptId;
+ private ContainerId containerId;
+
+ public AMInfoPBImpl() {
+ builder = AMInfoProto.newBuilder();
+ }
+
+ public AMInfoPBImpl(AMInfoProto proto) {
+ this.proto = proto;
+ viaProto = true;
+ }
+
+ public synchronized AMInfoProto getProto() {
+ mergeLocalToProto();
+ proto = viaProto ? proto : builder.build();
+ viaProto = true;
+ return proto;
+ }
+
+ private synchronized void mergeLocalToBuilder() {
+ if (this.appAttemptId != null
+ && !((ApplicationAttemptIdPBImpl) this.appAttemptId).getProto().equals(
+ builder.getApplicationAttemptId())) {
+ builder.setApplicationAttemptId(convertToProtoFormat(this.appAttemptId));
+ }
+ if (this.getContainerId() != null
+ && !((ContainerIdPBImpl) this.containerId).getProto().equals(
+ builder.getContainerId())) {
+ builder.setContainerId(convertToProtoFormat(this.containerId));
+ }
+ }
+
+ private synchronized void mergeLocalToProto() {
+ if (viaProto)
+ maybeInitBuilder();
+ mergeLocalToBuilder();
+ proto = builder.build();
+ viaProto = true;
+ }
+
+ private synchronized void maybeInitBuilder() {
+ if (viaProto || builder == null) {
+ builder = AMInfoProto.newBuilder(proto);
+ }
+ viaProto = false;
+ }
+
+ @Override
+ public synchronized ApplicationAttemptId getAppAttemptId() {
+ AMInfoProtoOrBuilder p = viaProto ? proto : builder;
+ if (appAttemptId != null) {
+ return appAttemptId;
+ } // Else via proto
+ if (!p.hasApplicationAttemptId()) {
+ return null;
+ }
+ appAttemptId = convertFromProtoFormat(p.getApplicationAttemptId());
+ return appAttemptId;
+ }
+
+ @Override
+ public synchronized void setAppAttemptId(ApplicationAttemptId appAttemptId) {
+ maybeInitBuilder();
+ if (appAttemptId == null) {
+ builder.clearApplicationAttemptId();
+ }
+ this.appAttemptId = appAttemptId;
+ }
+
+ @Override
+ public synchronized long getStartTime() {
+ AMInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getStartTime());
+ }
+
+ @Override
+ public synchronized void setStartTime(long startTime) {
+ maybeInitBuilder();
+ builder.setStartTime(startTime);
+ }
+
+ @Override
+ public synchronized ContainerId getContainerId() {
+ AMInfoProtoOrBuilder p = viaProto ? proto : builder;
+ if (containerId != null) {
+ return containerId;
+ } // Else via proto
+ if (!p.hasContainerId()) {
+ return null;
+ }
+ containerId = convertFromProtoFormat(p.getContainerId());
+ return containerId;
+ }
+
+ @Override
+ public synchronized void setContainerId(ContainerId containerId) {
+ maybeInitBuilder();
+ if (containerId == null) {
+ builder.clearContainerId();
+ }
+ this.containerId = containerId;
+ }
+
+ @Override
+ public synchronized String getNodeManagerHost() {
+ AMInfoProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasNodeManagerHost()) {
+ return null;
+ }
+ return p.getNodeManagerHost();
+ }
+
+ @Override
+ public synchronized void setNodeManagerHost(String nmHost) {
+ maybeInitBuilder();
+ if (nmHost == null) {
+ builder.clearNodeManagerHost();
+ return;
+ }
+ builder.setNodeManagerHost(nmHost);
+ }
+
+ @Override
+ public synchronized int getNodeManagerPort() {
+ AMInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getNodeManagerPort());
+ }
+
+ @Override
+ public synchronized void setNodeManagerPort(int nmPort) {
+ maybeInitBuilder();
+ builder.setNodeManagerPort(nmPort);
+ }
+
+ @Override
+ public synchronized int getNodeManagerHttpPort() {
+ AMInfoProtoOrBuilder p = viaProto ? proto : builder;
+ return p.getNodeManagerHttpPort();
+ }
+
+ @Override
+ public synchronized void setNodeManagerHttpPort(int httpPort) {
+ maybeInitBuilder();
+ builder.setNodeManagerHttpPort(httpPort);
+ }
+
+ private ApplicationAttemptIdPBImpl convertFromProtoFormat(
+ ApplicationAttemptIdProto p) {
+ return new ApplicationAttemptIdPBImpl(p);
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
+ private
+ ApplicationAttemptIdProto convertToProtoFormat(ApplicationAttemptId t) {
+ return ((ApplicationAttemptIdPBImpl) t).getProto();
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl) t).getProto();
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
index 2af50b6820c..41e46c33915 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/JobReportPBImpl.java
@@ -19,9 +19,14 @@
package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.proto.MRProtos.AMInfoProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobIdProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.JobReportProtoOrBuilder;
@@ -31,12 +36,14 @@ import org.apache.hadoop.yarn.api.records.ProtoBase;
-public class JobReportPBImpl extends ProtoBase implements JobReport {
+public class JobReportPBImpl extends ProtoBase implements
+ JobReport {
JobReportProto proto = JobReportProto.getDefaultInstance();
JobReportProto.Builder builder = null;
boolean viaProto = false;
private JobId jobId = null;
+ private List amInfos = null;
public JobReportPBImpl() {
@@ -48,20 +55,23 @@ public class JobReportPBImpl extends ProtoBase implements JobRep
viaProto = true;
}
- public JobReportProto getProto() {
+ public synchronized JobReportProto getProto() {
mergeLocalToProto();
proto = viaProto ? proto : builder.build();
viaProto = true;
return proto;
}
- private void mergeLocalToBuilder() {
+ private synchronized void mergeLocalToBuilder() {
if (this.jobId != null) {
builder.setJobId(convertToProtoFormat(this.jobId));
}
+ if (this.amInfos != null) {
+ addAMInfosToProto();
+ }
}
- private void mergeLocalToProto() {
+ private synchronized void mergeLocalToProto() {
if (viaProto)
maybeInitBuilder();
mergeLocalToBuilder();
@@ -69,7 +79,7 @@ public class JobReportPBImpl extends ProtoBase implements JobRep
viaProto = true;
}
- private void maybeInitBuilder() {
+ private synchronized void maybeInitBuilder() {
if (viaProto || builder == null) {
builder = JobReportProto.newBuilder(proto);
}
@@ -78,7 +88,7 @@ public class JobReportPBImpl extends ProtoBase implements JobRep
@Override
- public JobId getJobId() {
+ public synchronized JobId getJobId() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
if (this.jobId != null) {
return this.jobId;
@@ -91,14 +101,14 @@ public class JobReportPBImpl extends ProtoBase implements JobRep
}
@Override
- public void setJobId(JobId jobId) {
+ public synchronized void setJobId(JobId jobId) {
maybeInitBuilder();
if (jobId == null)
builder.clearJobId();
this.jobId = jobId;
}
@Override
- public JobState getJobState() {
+ public synchronized JobState getJobState() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
if (!p.hasJobState()) {
return null;
@@ -107,7 +117,7 @@ public class JobReportPBImpl extends ProtoBase implements JobRep
}
@Override
- public void setJobState(JobState jobState) {
+ public synchronized void setJobState(JobState jobState) {
maybeInitBuilder();
if (jobState == null) {
builder.clearJobState();
@@ -116,132 +126,197 @@ public class JobReportPBImpl extends ProtoBase implements JobRep
builder.setJobState(convertToProtoFormat(jobState));
}
@Override
- public float getMapProgress() {
+ public synchronized float getMapProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getMapProgress());
}
@Override
- public void setMapProgress(float mapProgress) {
+ public synchronized void setMapProgress(float mapProgress) {
maybeInitBuilder();
builder.setMapProgress((mapProgress));
}
@Override
- public float getReduceProgress() {
+ public synchronized float getReduceProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getReduceProgress());
}
@Override
- public void setReduceProgress(float reduceProgress) {
+ public synchronized void setReduceProgress(float reduceProgress) {
maybeInitBuilder();
builder.setReduceProgress((reduceProgress));
}
@Override
- public float getCleanupProgress() {
+ public synchronized float getCleanupProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getCleanupProgress());
}
@Override
- public void setCleanupProgress(float cleanupProgress) {
+ public synchronized void setCleanupProgress(float cleanupProgress) {
maybeInitBuilder();
builder.setCleanupProgress((cleanupProgress));
}
@Override
- public float getSetupProgress() {
+ public synchronized float getSetupProgress() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getSetupProgress());
}
@Override
- public void setSetupProgress(float setupProgress) {
+ public synchronized void setSetupProgress(float setupProgress) {
maybeInitBuilder();
builder.setSetupProgress((setupProgress));
}
+
@Override
- public long getStartTime() {
+ public synchronized long getSubmitTime() {
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getSubmitTime());
+ }
+
+ @Override
+ public synchronized void setSubmitTime(long submitTime) {
+ maybeInitBuilder();
+ builder.setSubmitTime((submitTime));
+ }
+
+ @Override
+ public synchronized long getStartTime() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getStartTime());
}
@Override
- public void setStartTime(long startTime) {
+ public synchronized void setStartTime(long startTime) {
maybeInitBuilder();
builder.setStartTime((startTime));
}
@Override
- public long getFinishTime() {
+ public synchronized long getFinishTime() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getFinishTime());
}
@Override
- public void setFinishTime(long finishTime) {
+ public synchronized void setFinishTime(long finishTime) {
maybeInitBuilder();
builder.setFinishTime((finishTime));
}
@Override
- public String getUser() {
+ public synchronized String getUser() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getUser());
}
@Override
- public void setUser(String user) {
+ public synchronized void setUser(String user) {
maybeInitBuilder();
builder.setUser((user));
}
@Override
- public String getJobName() {
+ public synchronized String getJobName() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getJobName());
}
@Override
- public void setJobName(String jobName) {
+ public synchronized void setJobName(String jobName) {
maybeInitBuilder();
builder.setJobName((jobName));
}
@Override
- public String getTrackingUrl() {
+ public synchronized String getTrackingUrl() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return (p.getTrackingUrl());
}
@Override
- public void setTrackingUrl(String trackingUrl) {
+ public synchronized void setTrackingUrl(String trackingUrl) {
maybeInitBuilder();
builder.setTrackingUrl(trackingUrl);
}
@Override
- public String getDiagnostics() {
+ public synchronized String getDiagnostics() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getDiagnostics();
}
@Override
- public void setDiagnostics(String diagnostics) {
+ public synchronized void setDiagnostics(String diagnostics) {
maybeInitBuilder();
builder.setDiagnostics(diagnostics);
}
@Override
- public String getJobFile() {
+ public synchronized String getJobFile() {
JobReportProtoOrBuilder p = viaProto ? proto : builder;
return p.getJobFile();
}
@Override
- public void setJobFile(String jobFile) {
+ public synchronized void setJobFile(String jobFile) {
maybeInitBuilder();
builder.setJobFile(jobFile);
}
+ @Override
+ public synchronized List getAMInfos() {
+ initAMInfos();
+ return this.amInfos;
+ }
+
+ @Override
+ public synchronized void setAMInfos(List amInfos) {
+ maybeInitBuilder();
+ if (amInfos == null) {
+ this.builder.clearAmInfos();
+ this.amInfos = null;
+ return;
+ }
+ initAMInfos();
+ this.amInfos.clear();
+ this.amInfos.addAll(amInfos);
+ }
+
+
+ private synchronized void initAMInfos() {
+ if (this.amInfos != null) {
+ return;
+ }
+ JobReportProtoOrBuilder p = viaProto ? proto : builder;
+ List list = p.getAmInfosList();
+
+ this.amInfos = new ArrayList();
+
+ for (AMInfoProto amInfoProto : list) {
+ this.amInfos.add(convertFromProtoFormat(amInfoProto));
+ }
+ }
+
+ private synchronized void addAMInfosToProto() {
+ maybeInitBuilder();
+ builder.clearAmInfos();
+ if (this.amInfos == null)
+ return;
+ for (AMInfo amInfo : this.amInfos) {
+ builder.addAmInfos(convertToProtoFormat(amInfo));
+ }
+ }
+
+ private AMInfoPBImpl convertFromProtoFormat(AMInfoProto p) {
+ return new AMInfoPBImpl(p);
+ }
+
+ private AMInfoProto convertToProtoFormat(AMInfo t) {
+ return ((AMInfoPBImpl)t).getProto();
+ }
+
private JobIdPBImpl convertFromProtoFormat(JobIdProto p) {
return new JobIdPBImpl(p);
}
@@ -257,7 +332,4 @@ public class JobReportPBImpl extends ProtoBase implements JobRep
private JobState convertFromProtoFormat(JobStateProto e) {
return MRProtoUtils.convertFromProtoFormat(e);
}
-
-
-
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
index c52bf5a3c24..999d7702920 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/TaskAttemptReportPBImpl.java
@@ -31,7 +31,10 @@ import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProto;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptReportProtoOrBuilder;
import org.apache.hadoop.mapreduce.v2.proto.MRProtos.TaskAttemptStateProto;
import org.apache.hadoop.mapreduce.v2.util.MRProtoUtils;
+import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.api.records.ProtoBase;
+import org.apache.hadoop.yarn.api.records.impl.pb.ContainerIdPBImpl;
+import org.apache.hadoop.yarn.proto.YarnProtos.ContainerIdProto;
@@ -42,6 +45,7 @@ public class TaskAttemptReportPBImpl extends ProtoBase i
private TaskAttemptId taskAttemptId = null;
private Counters counters = null;
+ private ContainerId containerId = null;
public TaskAttemptReportPBImpl() {
@@ -67,6 +71,9 @@ public class TaskAttemptReportPBImpl extends ProtoBase i
if (this.counters != null) {
builder.setCounters(convertToProtoFormat(this.counters));
}
+ if (this.containerId != null) {
+ builder.setContainerId(convertToProtoFormat(this.containerId));
+ }
}
private void mergeLocalToProto() {
@@ -255,7 +262,80 @@ public class TaskAttemptReportPBImpl extends ProtoBase i
}
builder.setPhase(convertToProtoFormat(phase));
}
+
+ @Override
+ public String getNodeManagerHost() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (!p.hasNodeManagerHost()) {
+ return null;
+ }
+ return p.getNodeManagerHost();
+ }
+
+ @Override
+ public void setNodeManagerHost(String nmHost) {
+ maybeInitBuilder();
+ if (nmHost == null) {
+ builder.clearNodeManagerHost();
+ return;
+ }
+ builder.setNodeManagerHost(nmHost);
+ }
+
+ @Override
+ public int getNodeManagerPort() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getNodeManagerPort());
+ }
+
+ @Override
+ public void setNodeManagerPort(int nmPort) {
+ maybeInitBuilder();
+ builder.setNodeManagerPort(nmPort);
+ }
+
+ @Override
+ public int getNodeManagerHttpPort() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ return (p.getNodeManagerHttpPort());
+ }
+
+ @Override
+ public void setNodeManagerHttpPort(int nmHttpPort) {
+ maybeInitBuilder();
+ builder.setNodeManagerHttpPort(nmHttpPort);
+ }
+
+ @Override
+ public ContainerId getContainerId() {
+ TaskAttemptReportProtoOrBuilder p = viaProto ? proto : builder;
+ if (containerId != null) {
+ return containerId;
+ } // Else via proto
+ if (!p.hasContainerId()) {
+ return null;
+ }
+ containerId = convertFromProtoFormat(p.getContainerId());
+ return containerId;
+ }
+ @Override
+ public void setContainerId(ContainerId containerId) {
+ maybeInitBuilder();
+ if (containerId == null) {
+ builder.clearContainerId();
+ }
+ this.containerId = containerId;
+ }
+
+ private ContainerIdProto convertToProtoFormat(ContainerId t) {
+ return ((ContainerIdPBImpl)t).getProto();
+ }
+
+ private ContainerIdPBImpl convertFromProtoFormat(ContainerIdProto p) {
+ return new ContainerIdPBImpl(p);
+ }
+
private CountersPBImpl convertFromProtoFormat(CountersProto p) {
return new CountersPBImpl(p);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/package-info.java
new file mode 100644
index 00000000000..7743d5a49c4
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/impl/pb/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.api.records.impl.pb;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/package-info.java
new file mode 100644
index 00000000000..a4cf2bf4fa3
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/api/records/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.api.records;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
index 5eaf0e3ee76..f22d51c7c62 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/FileNameIndexUtils.java
@@ -29,11 +29,11 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobId;
public class FileNameIndexUtils {
- static final String UNDERSCORE_ESCAPE = "%5F";
static final int JOB_NAME_TRIM_LENGTH = 50;
- //This has to be underscore currently. Untill escape uses DELIMITER.
- static final String DELIMITER = "_";
+ // Sanitize job history file for predictable parsing
+ static final String DELIMITER = "-";
+ static final String DELIMITER_ESCAPE = "%2D";
private static final int JOB_ID_INDEX = 0;
private static final int SUBMIT_TIME_INDEX = 1;
@@ -54,7 +54,7 @@ public class FileNameIndexUtils {
public static String getDoneFileName(JobIndexInfo indexInfo) throws IOException {
StringBuilder sb = new StringBuilder();
//JobId
- sb.append(escapeUnderscores(TypeConverter.fromYarn(indexInfo.getJobId()).toString()));
+ sb.append(escapeDelimiters(TypeConverter.fromYarn(indexInfo.getJobId()).toString()));
sb.append(DELIMITER);
//StartTime
@@ -62,11 +62,11 @@ public class FileNameIndexUtils {
sb.append(DELIMITER);
//UserName
- sb.append(escapeUnderscores(getUserName(indexInfo)));
+ sb.append(escapeDelimiters(getUserName(indexInfo)));
sb.append(DELIMITER);
//JobName
- sb.append(escapeUnderscores(trimJobName(getJobName(indexInfo))));
+ sb.append(escapeDelimiters(trimJobName(getJobName(indexInfo))));
sb.append(DELIMITER);
//FinishTime
@@ -136,13 +136,13 @@ public class FileNameIndexUtils {
*/
public static String encodeJobHistoryFileName(String logFileName)
throws IOException {
- String replacementUnderscoreEscape = null;
+ String replacementDelimiterEscape = null;
- if (logFileName.contains(UNDERSCORE_ESCAPE)) {
- replacementUnderscoreEscape = nonOccursString(logFileName);
+ // Temporarily protect the escape delimiters from encoding
+ if (logFileName.contains(DELIMITER_ESCAPE)) {
+ replacementDelimiterEscape = nonOccursString(logFileName);
- logFileName = replaceStringInstances
- (logFileName, UNDERSCORE_ESCAPE, replacementUnderscoreEscape);
+ logFileName = logFileName.replaceAll(DELIMITER_ESCAPE, replacementDelimiterEscape);
}
String encodedFileName = null;
@@ -154,10 +154,10 @@ public class FileNameIndexUtils {
ioe.setStackTrace(uee.getStackTrace());
throw ioe;
}
-
- if (replacementUnderscoreEscape != null) {
- encodedFileName = replaceStringInstances
- (encodedFileName, replacementUnderscoreEscape, UNDERSCORE_ESCAPE);
+
+ // Restore protected escape delimiters after encoding
+ if (replacementDelimiterEscape != null) {
+ encodedFileName = encodedFileName.replaceAll(replacementDelimiterEscape, DELIMITER_ESCAPE);
}
return encodedFileName;
@@ -214,29 +214,10 @@ public class FileNameIndexUtils {
return in;
}
- private static String escapeUnderscores(String escapee) {
- return replaceStringInstances(escapee, "_", UNDERSCORE_ESCAPE);
+ private static String escapeDelimiters(String escapee) {
+ return escapee.replaceAll(DELIMITER, DELIMITER_ESCAPE);
}
-
- // I tolerate this code because I expect a low number of
- // occurrences in a relatively short string
- private static String replaceStringInstances
- (String logFileName, String old, String replacement) {
- int index = logFileName.indexOf(old);
- while (index > 0) {
- logFileName = (logFileName.substring(0, index)
- + replacement
- + replaceStringInstances
- (logFileName.substring(index + old.length()),
- old, replacement));
-
- index = logFileName.indexOf(old);
- }
-
- return logFileName;
- }
-
/**
* Trims the job-name if required
*/
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
index a726a005b59..cb529243d12 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JHAdminConfig.java
@@ -33,7 +33,9 @@ public class JHAdminConfig {
/** host:port address for History Server API.*/
public static final String MR_HISTORY_ADDRESS = MR_HISTORY_PREFIX + "address";
- public static final String DEFAULT_MR_HISTORY_ADDRESS = "0.0.0.0:10020";
+ public static final int DEFAULT_MR_HISTORY_PORT = 10020;
+ public static final String DEFAULT_MR_HISTORY_ADDRESS = "0.0.0.0:" +
+ DEFAULT_MR_HISTORY_PORT;
/** If history cleaning should be enabled or not.*/
public static final String MR_HISTORY_CLEANER_ENABLE =
@@ -106,6 +108,7 @@ public class JHAdminConfig {
/**The address the history server webapp is on.*/
public static final String MR_HISTORY_WEBAPP_ADDRESS =
MR_HISTORY_PREFIX + "webapp.address";
+ public static final int DEFAULT_MR_HISTORY_WEBAPP_PORT = 19888;
public static final String DEFAULT_MR_HISTORY_WEBAPP_ADDRESS =
- "0.0.0.0:19888";
+ "0.0.0.0:" + DEFAULT_MR_HISTORY_WEBAPP_PORT;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
index e57cf8d3c63..711dd18118b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/JobHistoryUtils.java
@@ -480,7 +480,9 @@ public class JobHistoryUtils {
//construct the history url for job
String hsAddress = conf.get(JHAdminConfig.MR_HISTORY_WEBAPP_ADDRESS,
JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
- InetSocketAddress address = NetUtils.createSocketAddr(hsAddress);
+ InetSocketAddress address = NetUtils.createSocketAddr(
+ hsAddress, JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_PORT,
+ JHAdminConfig.DEFAULT_MR_HISTORY_WEBAPP_ADDRESS);
StringBuffer sb = new StringBuffer();
if (address.getAddress().isAnyLocalAddress() ||
address.getAddress().isLoopbackAddress()) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/package-info.java
new file mode 100644
index 00000000000..6fee88fa3c7
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/jobhistory/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.jobhistory;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/package-info.java
new file mode 100644
index 00000000000..3101a849032
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/client/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.security.client;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
index 9094da39ba3..11eca7ea3d7 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRApps.java
@@ -44,6 +44,7 @@ import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.yarn.ContainerLogAppender;
import org.apache.hadoop.yarn.YarnException;
import org.apache.hadoop.yarn.api.ApplicationConstants.Environment;
import org.apache.hadoop.yarn.api.ApplicationConstants;
@@ -182,17 +183,18 @@ public class MRApps extends Apps {
reader = new BufferedReader(new InputStreamReader(classpathFileStream));
String cp = reader.readLine();
if (cp != null) {
- addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
+ Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), cp.trim());
}
// Put the file itself on classpath for tasks.
- addToEnvironment(
+ Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
- thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile());
+ thisClassLoader.getResource(mrAppGeneratedClasspathFile).getFile()
+ .split("!")[0]);
// Add standard Hadoop classes
for (String c : ApplicationConstants.APPLICATION_CLASSPATH) {
- addToEnvironment(environment, Environment.CLASSPATH.name(), c);
+ Apps.addToEnvironment(environment, Environment.CLASSPATH.name(), c);
}
} finally {
if (classpathFileStream != null) {
@@ -205,28 +207,13 @@ public class MRApps extends Apps {
// TODO: Remove duplicates.
}
- private static final String SYSTEM_PATH_SEPARATOR =
- System.getProperty("path.separator");
-
- public static void addToEnvironment(
- Map environment,
- String variable, String value) {
- String val = environment.get(variable);
- if (val == null) {
- val = value;
- } else {
- val = val + SYSTEM_PATH_SEPARATOR + value;
- }
- environment.put(variable, val);
- }
-
public static void setClasspath(Map environment)
throws IOException {
- MRApps.addToEnvironment(
+ Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
MRJobConfig.JOB_JAR);
- MRApps.addToEnvironment(
+ Apps.addToEnvironment(
environment,
Environment.CLASSPATH.name(),
Environment.PWD.$() + Path.SEPARATOR + "*");
@@ -355,43 +342,19 @@ public class MRApps extends Apps {
}
return result;
}
-
- public static void setEnvFromInputString(Map env,
- String envString) {
- if (envString != null && envString.length() > 0) {
- String childEnvs[] = envString.split(",");
- for (String cEnv : childEnvs) {
- String[] parts = cEnv.split("="); // split on '='
- String value = env.get(parts[0]);
- if (value != null) {
- // Replace $env with the child's env constructed by NM's
- // For example: LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/tmp
- value = parts[1].replace("$" + parts[0], value);
- } else {
- // example PATH=$PATH:/tmp
- value = System.getenv(parts[0]);
- if (value != null) {
- // the env key is present in the tt's env
- value = parts[1].replace("$" + parts[0], value);
- } else {
- // check for simple variable substitution
- // for e.g. ROOT=$HOME
- String envValue = System.getenv(parts[1].substring(1));
- if (envValue != null) {
- value = envValue;
- } else {
- // the env key is note present anywhere .. simply set it
- // example X=$X:/tmp or X=/tmp
- value = parts[1].replace("$" + parts[0], "");
- }
- }
- }
- addToEnvironment(env, parts[0], value);
- }
- }
+ /**
+ * Add the JVM system properties necessary to configure {@link ContainerLogAppender}.
+ * @param logLevel the desired log level (eg INFO/WARN/DEBUG)
+ * @param logSize See {@link ContainerLogAppender#setTotalLogFileSize(long)}
+ * @param vargs the argument list to append to
+ */
+ public static void addLog4jSystemProperties(
+ String logLevel, long logSize, List vargs) {
+ vargs.add("-Dlog4j.configuration=container-log4j.properties");
+ vargs.add("-D" + MRJobConfig.TASK_LOG_DIR + "=" +
+ ApplicationConstants.LOG_DIR_EXPANSION_VAR);
+ vargs.add("-D" + MRJobConfig.TASK_LOG_SIZE + "=" + logSize);
+ vargs.add("-Dhadoop.root.logger=" + logLevel + ",CLA");
}
-
-
-
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
index 543454c15a4..109028205da 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/MRBuilderUtils.java
@@ -18,13 +18,18 @@
package org.apache.hadoop.mapreduce.v2.util;
+import java.util.List;
+
+import org.apache.hadoop.mapreduce.v2.api.records.AMInfo;
import org.apache.hadoop.mapreduce.v2.api.records.JobId;
import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
import org.apache.hadoop.mapreduce.v2.api.records.JobState;
import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
import org.apache.hadoop.yarn.api.records.ApplicationId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
import org.apache.hadoop.yarn.util.Records;
public class MRBuilderUtils {
@@ -53,14 +58,15 @@ public class MRBuilderUtils {
}
public static JobReport newJobReport(JobId jobId, String jobName,
- String userName, JobState state, long startTime, long finishTime,
+ String userName, JobState state, long submitTime, long startTime, long finishTime,
float setupProgress, float mapProgress, float reduceProgress,
- float cleanupProgress, String jobFile) {
+ float cleanupProgress, String jobFile, List amInfos) {
JobReport report = Records.newRecord(JobReport.class);
report.setJobId(jobId);
report.setJobName(jobName);
report.setUser(userName);
report.setJobState(state);
+ report.setSubmitTime(submitTime);
report.setStartTime(startTime);
report.setFinishTime(finishTime);
report.setSetupProgress(setupProgress);
@@ -68,6 +74,20 @@ public class MRBuilderUtils {
report.setMapProgress(mapProgress);
report.setReduceProgress(reduceProgress);
report.setJobFile(jobFile);
+ report.setAMInfos(amInfos);
return report;
}
+
+ public static AMInfo newAMInfo(ApplicationAttemptId appAttemptId,
+ long startTime, ContainerId containerId, String nmHost, int nmPort,
+ int nmHttpPort) {
+ AMInfo amInfo = Records.newRecord(AMInfo.class);
+ amInfo.setAppAttemptId(appAttemptId);
+ amInfo.setStartTime(startTime);
+ amInfo.setContainerId(containerId);
+ amInfo.setNodeManagerHost(nmHost);
+ amInfo.setNodeManagerPort(nmPort);
+ amInfo.setNodeManagerHttpPort(nmHttpPort);
+ return amInfo;
+ }
}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/package-info.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/package-info.java
new file mode 100644
index 00000000000..619085b2b63
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/util/package-info.java
@@ -0,0 +1,20 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+package org.apache.hadoop.mapreduce.v2.util;
+import org.apache.hadoop.classification.InterfaceAudience;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
index a4375c9e677..3390b7ad845 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/proto/mr_protos.proto
@@ -119,6 +119,10 @@ message TaskAttemptReportProto {
optional PhaseProto phase = 9;
optional int64 shuffle_finish_time = 10;
optional int64 sort_finish_time=11;
+ optional string node_manager_host = 12;
+ optional int32 node_manager_port = 13;
+ optional int32 node_manager_http_port = 14;
+ optional ContainerIdProto container_id = 15;
}
enum JobStateProto {
@@ -146,6 +150,17 @@ message JobReportProto {
optional string trackingUrl = 11;
optional string diagnostics = 12;
optional string jobFile = 13;
+ repeated AMInfoProto am_infos = 14;
+ optional int64 submit_time = 15;
+}
+
+message AMInfoProto {
+ optional ApplicationAttemptIdProto application_attempt_id = 1;
+ optional int64 start_time = 2;
+ optional ContainerIdProto container_id = 3;
+ optional string node_manager_host = 4;
+ optional int32 node_manager_port = 5;
+ optional int32 node_manager_http_port = 6;
}
enum TaskAttemptCompletionEventStatusProto {
diff --git a/hadoop-mapreduce-project/src/contrib/capacity-scheduler/ivy/libraries.properties b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
similarity index 65%
rename from hadoop-mapreduce-project/src/contrib/capacity-scheduler/ivy/libraries.properties
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
index 8a80dd81a99..5b8dfdcb4ef 100644
--- a/hadoop-mapreduce-project/src/contrib/capacity-scheduler/ivy/libraries.properties
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/resources/META-INF/services/org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider
@@ -1,3 +1,4 @@
+#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
@@ -9,9 +10,5 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
-
-#This properties file lists the versions of the various artifacts used by streaming.
-#It drives ivy and the generation of a maven POM
-
-#Please list the dependencies name with version if they are different from the ones
-#listed in the global libraries.properties file (in alphabetical order)
+#
+org.apache.hadoop.mapred.LocalClientProtocolProvider
diff --git a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMRWithDistributedCache.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
similarity index 96%
rename from hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
index 8b1691a46d6..ed89bf9fd4f 100644
--- a/hadoop-mapreduce-project/src/test/mapred/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapred/TestMRWithDistributedCache.java
@@ -163,17 +163,6 @@ public class TestMRWithDistributedCache extends TestCase {
testWithConf(c);
}
- /** Tests using a full MiniMRCluster. */
- public void testMiniMRJobRunner() throws Exception {
- MiniMRCluster m = new MiniMRCluster(1, "file:///", 1);
- try {
- testWithConf(m.createJobConf());
- } finally {
- m.shutdown();
- }
-
- }
-
private Path createTempFile(String filename, String contents)
throws IOException {
Path path = new Path(TEST_ROOT_DIR, filename);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
index 43ca32020d1..9bbd070768e 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/TestTypeConverter.java
@@ -20,18 +20,49 @@ package org.apache.hadoop.mapreduce;
import junit.framework.Assert;
import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
import org.apache.hadoop.yarn.api.records.ApplicationId;
import org.apache.hadoop.yarn.api.records.YarnApplicationState;
import org.apache.hadoop.yarn.api.records.ApplicationReport;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationIdPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationReportPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ApplicationResourceUsageReportPBImpl;
import org.apache.hadoop.yarn.api.records.impl.pb.QueueInfoPBImpl;
+import org.apache.hadoop.yarn.api.records.impl.pb.ResourcePBImpl;
+import org.apache.hadoop.yarn.api.records.QueueState;
import static org.mockito.Mockito.mock;
import static org.mockito.Mockito.when;
import org.junit.Test;
public class TestTypeConverter {
+ @Test
+ public void testEnums() throws Exception {
+ for (YarnApplicationState applicationState : YarnApplicationState.values()) {
+ TypeConverter.fromYarn(applicationState);
+ }
+
+ for (TaskType taskType : TaskType.values()) {
+ TypeConverter.fromYarn(taskType);
+ }
+
+ for (JobState jobState : JobState.values()) {
+ TypeConverter.fromYarn(jobState);
+ }
+
+ for (QueueState queueState : QueueState.values()) {
+ TypeConverter.fromYarn(queueState);
+ }
+
+ for (TaskState taskState : TaskState.values()) {
+ TypeConverter.fromYarn(taskState);
+ }
+
+
+ }
+
@Test
public void testFromYarn() throws Exception {
int appStartTime = 612354;
@@ -42,6 +73,15 @@ public class TestTypeConverter {
applicationReport.setYarnApplicationState(state);
applicationReport.setStartTime(appStartTime);
applicationReport.setUser("TestTypeConverter-user");
+ ApplicationResourceUsageReportPBImpl appUsageRpt = new ApplicationResourceUsageReportPBImpl();
+ ResourcePBImpl r = new ResourcePBImpl();
+ r.setMemory(2048);
+ appUsageRpt.setNeededResources(r);
+ appUsageRpt.setNumReservedContainers(1);
+ appUsageRpt.setNumUsedContainers(3);
+ appUsageRpt.setReservedResources(r);
+ appUsageRpt.setUsedResources(r);
+ applicationReport.setApplicationResourceUsageReport(appUsageRpt);
JobStatus jobStatus = TypeConverter.fromYarn(applicationReport, "dummy-jobfile");
Assert.assertEquals(appStartTime, jobStatus.getStartTime());
Assert.assertEquals(state.toString(), jobStatus.getState().toString());
@@ -60,6 +100,15 @@ public class TestTypeConverter {
when(mockReport.getUser()).thenReturn("dummy-user");
when(mockReport.getQueue()).thenReturn("dummy-queue");
String jobFile = "dummy-path/job.xml";
+ ApplicationResourceUsageReportPBImpl appUsageRpt = new ApplicationResourceUsageReportPBImpl();
+ ResourcePBImpl r = new ResourcePBImpl();
+ r.setMemory(2048);
+ appUsageRpt.setNeededResources(r);
+ appUsageRpt.setNumReservedContainers(1);
+ appUsageRpt.setNumUsedContainers(3);
+ appUsageRpt.setReservedResources(r);
+ appUsageRpt.setUsedResources(r);
+ when(mockReport.getApplicationResourceUsageReport()).thenReturn(appUsageRpt);
JobStatus status = TypeConverter.fromYarn(mockReport, jobFile);
Assert.assertNotNull("fromYarn returned null status", status);
Assert.assertEquals("jobFile set incorrectly", "dummy-path/job.xml", status.getJobFile());
@@ -69,6 +118,11 @@ public class TestTypeConverter {
Assert.assertEquals("schedulingInfo set incorrectly", "dummy-tracking-url", status.getSchedulingInfo());
Assert.assertEquals("jobId set incorrectly", 6789, status.getJobID().getId());
Assert.assertEquals("state set incorrectly", JobStatus.State.KILLED, status.getState());
+ Assert.assertEquals("needed mem info set incorrectly", 2048, status.getNeededMem());
+ Assert.assertEquals("num rsvd slots info set incorrectly", 1, status.getNumReservedSlots());
+ Assert.assertEquals("num used slots info set incorrectly", 3, status.getNumUsedSlots());
+ Assert.assertEquals("rsvd mem info set incorrectly", 2048, status.getReservedMem());
+ Assert.assertEquals("used mem info set incorrectly", 2048, status.getUsedMem());
}
@Test
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestFileNameIndexUtils.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestFileNameIndexUtils.java
new file mode 100644
index 00000000000..9de3dcdfaaa
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/java/org/apache/hadoop/mapreduce/v2/jobhistory/TestFileNameIndexUtils.java
@@ -0,0 +1,130 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.jobhistory;
+
+import java.io.IOException;
+
+import org.apache.hadoop.mapreduce.JobID;
+import org.apache.hadoop.mapreduce.TypeConverter;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+
+import org.junit.Assert;
+import org.junit.Test;
+
+public class TestFileNameIndexUtils {
+
+ private static final String JOB_HISTORY_FILE_FORMATTER = "%s"
+ + FileNameIndexUtils.DELIMITER + "%s"
+ + FileNameIndexUtils.DELIMITER + "%s"
+ + FileNameIndexUtils.DELIMITER + "%s"
+ + FileNameIndexUtils.DELIMITER + "%s"
+ + FileNameIndexUtils.DELIMITER + "%s"
+ + FileNameIndexUtils.DELIMITER + "%s"
+ + FileNameIndexUtils.DELIMITER + "%s"
+ + JobHistoryUtils.JOB_HISTORY_FILE_EXTENSION;
+
+ private static final String JOB_ID = "job_1317928501754_0001";
+ private static final String SUBMIT_TIME = "1317928742025";
+ private static final String USER_NAME = "username";
+ private static final String USER_NAME_WITH_DELIMITER = "user"
+ + FileNameIndexUtils.DELIMITER + "name";
+ private static final String USER_NAME_WITH_DELIMITER_ESCAPE = "user"
+ + FileNameIndexUtils.DELIMITER_ESCAPE + "name";
+ private static final String JOB_NAME = "mapreduce";
+ private static final String JOB_NAME_WITH_DELIMITER = "map"
+ + FileNameIndexUtils.DELIMITER + "reduce";
+ private static final String JOB_NAME_WITH_DELIMITER_ESCAPE = "map"
+ + FileNameIndexUtils.DELIMITER_ESCAPE + "reduce";
+ private static final String FINISH_TIME = "1317928754958";
+ private static final String NUM_MAPS = "1";
+ private static final String NUM_REDUCES = "1";
+ private static final String JOB_STATUS = "SUCCEEDED";
+
+ @Test
+ public void testUserNamePercentEncoding() throws IOException{
+ JobIndexInfo info = new JobIndexInfo();
+ JobID oldJobId = JobID.forName(JOB_ID);
+ JobId jobId = TypeConverter.toYarn(oldJobId);
+ info.setJobId(jobId);
+ info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
+ info.setUser(USER_NAME_WITH_DELIMITER);
+ info.setJobName(JOB_NAME);
+ info.setFinishTime(Long.parseLong(FINISH_TIME));
+ info.setNumMaps(Integer.parseInt(NUM_MAPS));
+ info.setNumReduces(Integer.parseInt(NUM_REDUCES));
+ info.setJobStatus(JOB_STATUS);
+
+ String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
+ Assert.assertTrue("User name not encoded correctly into job history file",
+ jobHistoryFile.contains(USER_NAME_WITH_DELIMITER_ESCAPE));
+ }
+
+ @Test
+ public void testUserNamePercentDecoding() throws IOException {
+ String jobHistoryFile = String.format(JOB_HISTORY_FILE_FORMATTER,
+ JOB_ID,
+ SUBMIT_TIME,
+ USER_NAME_WITH_DELIMITER_ESCAPE,
+ JOB_NAME,
+ FINISH_TIME,
+ NUM_MAPS,
+ NUM_REDUCES,
+ JOB_STATUS);
+
+ JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
+ Assert.assertEquals("User name doesn't match",
+ USER_NAME_WITH_DELIMITER, info.getUser());
+ }
+
+ @Test
+ public void testJobNamePercentEncoding() throws IOException {
+ JobIndexInfo info = new JobIndexInfo();
+ JobID oldJobId = JobID.forName(JOB_ID);
+ JobId jobId = TypeConverter.toYarn(oldJobId);
+ info.setJobId(jobId);
+ info.setSubmitTime(Long.parseLong(SUBMIT_TIME));
+ info.setUser(USER_NAME);
+ info.setJobName(JOB_NAME_WITH_DELIMITER);
+ info.setFinishTime(Long.parseLong(FINISH_TIME));
+ info.setNumMaps(Integer.parseInt(NUM_MAPS));
+ info.setNumReduces(Integer.parseInt(NUM_REDUCES));
+ info.setJobStatus(JOB_STATUS);
+
+ String jobHistoryFile = FileNameIndexUtils.getDoneFileName(info);
+ Assert.assertTrue("Job name not encoded correctly into job history file",
+ jobHistoryFile.contains(JOB_NAME_WITH_DELIMITER_ESCAPE));
+ }
+
+ @Test
+ public void testJobNamePercentDecoding() throws IOException {
+ String jobHistoryFile = String.format(JOB_HISTORY_FILE_FORMATTER,
+ JOB_ID,
+ SUBMIT_TIME,
+ USER_NAME,
+ JOB_NAME_WITH_DELIMITER_ESCAPE,
+ FINISH_TIME,
+ NUM_MAPS,
+ NUM_REDUCES,
+ JOB_STATUS);
+
+ JobIndexInfo info = FileNameIndexUtils.getIndexInfo(jobHistoryFile);
+ Assert.assertEquals("Job name doesn't match",
+ JOB_NAME_WITH_DELIMITER, info.getJobName());
+ }
+}
diff --git a/hadoop-mapreduce-project/src/contrib/fairscheduler/ivy/libraries.properties b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
similarity index 64%
rename from hadoop-mapreduce-project/src/contrib/fairscheduler/ivy/libraries.properties
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
index 8a80dd81a99..531b68b5a9f 100644
--- a/hadoop-mapreduce-project/src/contrib/fairscheduler/ivy/libraries.properties
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/test/resources/log4j.properties
@@ -10,8 +10,10 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-#This properties file lists the versions of the various artifacts used by streaming.
-#It drives ivy and the generation of a maven POM
+# log4j configuration used during build and unit tests
-#Please list the dependencies name with version if they are different from the ones
-#listed in the global libraries.properties file (in alphabetical order)
+log4j.rootLogger=info,stdout
+log4j.threshhold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p [%t] %c{2} (%F:%M(%L)) - %m%n
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
index be98eb7cdf9..138e4332aae 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/pom.xml
@@ -16,23 +16,28 @@
hadoop-mapreduce-clientorg.apache.hadoop
- ${hadoop-mapreduce.version}
+ 0.24.0-SNAPSHOT4.0.0org.apache.hadoophadoop-mapreduce-client-core
+ 0.24.0-SNAPSHOThadoop-mapreduce-client-core
- ${project.artifact.file}
- ${project.parent.parent.basedir}
+
+ ${project.parent.basedir}/..
-
+
org.apache.hadoophadoop-yarn-common
+
+ org.apache.hadoop
+ hadoop-yarn-server-nodemanager
+ org.apache.hadoophadoop-hdfs
@@ -41,6 +46,15 @@
+
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
+
+ file:///${project.parent.basedir}/../src/test/log4j.properties
+
+
+ org.apache.avroavro-maven-plugin
@@ -54,6 +68,24 @@
+
+ org.apache.maven.plugins
+ maven-antrun-plugin
+
+
+ pre-site
+
+ run
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
index baef951e531..ab739698e8a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/avro/Events.avpr
@@ -69,6 +69,17 @@
]
},
+ {"type": "record", "name": "AMStarted",
+ "fields": [
+ {"name": "applicationAttemptId", "type": "string"},
+ {"name": "startTime", "type": "long"},
+ {"name": "containerId", "type": "string"},
+ {"name": "nodeManagerHost", "type": "string"},
+ {"name": "nodeManagerPort", "type": "int"},
+ {"name": "nodeManagerHttpPort", "type": "int"}
+ ]
+ },
+
{"type": "record", "name": "JobSubmitted",
"fields": [
{"name": "jobid", "type": "string"},
@@ -125,6 +136,7 @@
{"name": "mapFinishTime", "type": "long"},
{"name": "finishTime", "type": "long"},
{"name": "hostname", "type": "string"},
+ {"name": "rackname", "type": "string"},
{"name": "state", "type": "string"},
{"name": "counters", "type": "JhCounters"},
{"name": "clockSplits", "type": { "type": "array", "items": "int"}},
@@ -144,6 +156,7 @@
{"name": "sortFinishTime", "type": "long"},
{"name": "finishTime", "type": "long"},
{"name": "hostname", "type": "string"},
+ {"name": "rackname", "type": "string"},
{"name": "state", "type": "string"},
{"name": "counters", "type": "JhCounters"},
{"name": "clockSplits", "type": { "type": "array", "items": "int"}},
@@ -173,7 +186,9 @@
{"name": "attemptId", "type": "string"},
{"name": "startTime", "type": "long"},
{"name": "trackerName", "type": "string"},
- {"name": "httpPort", "type": "int"}
+ {"name": "httpPort", "type": "int"},
+ {"name": "shufflePort", "type": "int"},
+ {"name": "containerId", "type": "string"}
]
},
@@ -213,7 +228,7 @@
{"name": "counters", "type": "JhCounters"}
]
},
-
+
{"type": "record", "name": "TaskStarted",
"fields": [
{"name": "taskid", "type": "string"},
@@ -244,6 +259,7 @@
"TASK_FINISHED",
"TASK_FAILED",
"TASK_UPDATED",
+ "NORMALIZED_RESOURCE",
"MAP_ATTEMPT_STARTED",
"MAP_ATTEMPT_FINISHED",
"MAP_ATTEMPT_FAILED",
@@ -259,7 +275,8 @@
"CLEANUP_ATTEMPT_STARTED",
"CLEANUP_ATTEMPT_FINISHED",
"CLEANUP_ATTEMPT_FAILED",
- "CLEANUP_ATTEMPT_KILLED"
+ "CLEANUP_ATTEMPT_KILLED",
+ "AM_STARTED"
]
},
@@ -271,6 +288,7 @@
"JobFinished",
"JobInfoChange",
"JobInited",
+ "AMStarted",
"JobPriorityChange",
"JobStatusChanged",
"JobSubmitted",
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
index a448977d7c3..32b6e2232d0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileOutputCommitter.java
@@ -38,7 +38,8 @@ public class FileOutputCommitter extends OutputCommitter {
public static final Log LOG = LogFactory.getLog(
"org.apache.hadoop.mapred.FileOutputCommitter");
-/**
+
+ /**
* Temporary directory name
*/
public static final String TEMP_DIR_NAME = "_temporary";
@@ -50,7 +51,9 @@ public class FileOutputCommitter extends OutputCommitter {
JobConf conf = context.getJobConf();
Path outputPath = FileOutputFormat.getOutputPath(conf);
if (outputPath != null) {
- Path tmpDir = new Path(outputPath, FileOutputCommitter.TEMP_DIR_NAME);
+ Path tmpDir =
+ new Path(outputPath, getJobAttemptBaseDirName(context) +
+ Path.SEPARATOR + FileOutputCommitter.TEMP_DIR_NAME);
FileSystem fileSys = tmpDir.getFileSystem(conf);
if (!fileSys.mkdirs(tmpDir)) {
LOG.error("Mkdirs failed to create " + tmpDir.toString());
@@ -65,12 +68,33 @@ public class FileOutputCommitter extends OutputCommitter {
}
public void commitJob(JobContext context) throws IOException {
- // delete the _temporary folder in the output folder
- cleanupJob(context);
- // check if the output-dir marking is required
- if (shouldMarkOutputDir(context.getJobConf())) {
- // create a _success file in the output folder
- markOutputDirSuccessful(context);
+ //delete the task temp directory from the current jobtempdir
+ JobConf conf = context.getJobConf();
+ Path outputPath = FileOutputFormat.getOutputPath(conf);
+ if (outputPath != null) {
+ FileSystem outputFileSystem = outputPath.getFileSystem(conf);
+ Path tmpDir = new Path(outputPath, getJobAttemptBaseDirName(context) +
+ Path.SEPARATOR + FileOutputCommitter.TEMP_DIR_NAME);
+ FileSystem fileSys = tmpDir.getFileSystem(context.getConfiguration());
+ if (fileSys.exists(tmpDir)) {
+ fileSys.delete(tmpDir, true);
+ } else {
+ LOG.warn("Task temp dir could not be deleted " + tmpDir);
+ }
+
+ //move the job output to final place
+ Path jobOutputPath =
+ new Path(outputPath, getJobAttemptBaseDirName(context));
+ moveJobOutputs(outputFileSystem,
+ jobOutputPath, outputPath, jobOutputPath);
+
+ // delete the _temporary folder in the output folder
+ cleanupJob(context);
+ // check if the output-dir marking is required
+ if (shouldMarkOutputDir(context.getJobConf())) {
+ // create a _success file in the output folder
+ markOutputDirSuccessful(context);
+ }
}
}
@@ -88,6 +112,39 @@ public class FileOutputCommitter extends OutputCommitter {
}
}
+ private void moveJobOutputs(FileSystem fs, final Path origJobOutputPath,
+ Path finalOutputDir, Path jobOutput) throws IOException {
+ LOG.debug("Told to move job output from " + jobOutput
+ + " to " + finalOutputDir +
+ " and orig job output path is " + origJobOutputPath);
+ if (fs.isFile(jobOutput)) {
+ Path finalOutputPath =
+ getFinalPath(fs, finalOutputDir, jobOutput, origJobOutputPath);
+ if (!fs.rename(jobOutput, finalOutputPath)) {
+ if (!fs.delete(finalOutputPath, true)) {
+ throw new IOException("Failed to delete earlier output of job");
+ }
+ if (!fs.rename(jobOutput, finalOutputPath)) {
+ throw new IOException("Failed to save output of job");
+ }
+ }
+ LOG.debug("Moved job output file from " + jobOutput + " to " +
+ finalOutputPath);
+ } else if (fs.getFileStatus(jobOutput).isDirectory()) {
+ LOG.debug("Job output file " + jobOutput + " is a dir");
+ FileStatus[] paths = fs.listStatus(jobOutput);
+ Path finalOutputPath =
+ getFinalPath(fs, finalOutputDir, jobOutput, origJobOutputPath);
+ fs.mkdirs(finalOutputPath);
+ LOG.debug("Creating dirs along job output path " + finalOutputPath);
+ if (paths != null) {
+ for (FileStatus path : paths) {
+ moveJobOutputs(fs, origJobOutputPath, finalOutputDir, path.getPath());
+ }
+ }
+ }
+ }
+
@Override
@Deprecated
public void cleanupJob(JobContext context) throws IOException {
@@ -128,9 +185,14 @@ public class FileOutputCommitter extends OutputCommitter {
FileSystem fs = taskOutputPath.getFileSystem(job);
context.getProgressible().progress();
if (fs.exists(taskOutputPath)) {
- Path jobOutputPath = taskOutputPath.getParent().getParent();
- // Move the task outputs to their final place
- moveTaskOutputs(context, fs, jobOutputPath, taskOutputPath);
+ // Move the task outputs to the current job attempt output dir
+ JobConf conf = context.getJobConf();
+ Path outputPath = FileOutputFormat.getOutputPath(conf);
+ FileSystem outputFileSystem = outputPath.getFileSystem(conf);
+ Path jobOutputPath = new Path(outputPath, getJobTempDirName(context));
+ moveTaskOutputs(context, outputFileSystem, jobOutputPath,
+ taskOutputPath);
+
// Delete the temporary task-specific output directory
if (!fs.delete(taskOutputPath, true)) {
LOG.info("Failed to delete the temporary output" +
@@ -149,8 +211,10 @@ public class FileOutputCommitter extends OutputCommitter {
throws IOException {
TaskAttemptID attemptId = context.getTaskAttemptID();
context.getProgressible().progress();
+ LOG.debug("Told to move taskoutput from " + taskOutput
+ + " to " + jobOutputDir);
if (fs.isFile(taskOutput)) {
- Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput,
+ Path finalOutputPath = getFinalPath(fs, jobOutputDir, taskOutput,
getTempTaskOutputPath(context));
if (!fs.rename(taskOutput, finalOutputPath)) {
if (!fs.delete(finalOutputPath, true)) {
@@ -164,10 +228,12 @@ public class FileOutputCommitter extends OutputCommitter {
}
LOG.debug("Moved " + taskOutput + " to " + finalOutputPath);
} else if(fs.getFileStatus(taskOutput).isDirectory()) {
+ LOG.debug("Taskoutput " + taskOutput + " is a dir");
FileStatus[] paths = fs.listStatus(taskOutput);
- Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput,
+ Path finalOutputPath = getFinalPath(fs, jobOutputDir, taskOutput,
getTempTaskOutputPath(context));
fs.mkdirs(finalOutputPath);
+ LOG.debug("Creating dirs along path " + finalOutputPath);
if (paths != null) {
for (FileStatus path : paths) {
moveTaskOutputs(context, fs, jobOutputDir, path.getPath());
@@ -185,13 +251,16 @@ public class FileOutputCommitter extends OutputCommitter {
}
}
- private Path getFinalPath(Path jobOutputDir, Path taskOutput,
+ @SuppressWarnings("deprecation")
+ private Path getFinalPath(FileSystem fs, Path jobOutputDir, Path taskOutput,
Path taskOutputPath) throws IOException {
- URI taskOutputUri = taskOutput.toUri();
- URI relativePath = taskOutputPath.toUri().relativize(taskOutputUri);
- if (taskOutputUri == relativePath) {//taskOutputPath is not a parent of taskOutput
+ URI taskOutputUri = taskOutput.makeQualified(fs).toUri();
+ URI taskOutputPathUri = taskOutputPath.makeQualified(fs).toUri();
+ URI relativePath = taskOutputPathUri.relativize(taskOutputUri);
+ if (taskOutputUri == relativePath) {
+ //taskOutputPath is not a parent of taskOutput
throw new IOException("Can not get the relative path: base = " +
- taskOutputPath + " child = " + taskOutput);
+ taskOutputPathUri + " child = " + taskOutputUri);
}
if (relativePath.getPath().length() > 0) {
return new Path(jobOutputDir, relativePath.getPath());
@@ -216,7 +285,8 @@ public class FileOutputCommitter extends OutputCommitter {
return false;
}
- Path getTempTaskOutputPath(TaskAttemptContext taskContext) throws IOException {
+ Path getTempTaskOutputPath(TaskAttemptContext taskContext)
+ throws IOException {
JobConf conf = taskContext.getJobConf();
Path outputPath = FileOutputFormat.getOutputPath(conf);
if (outputPath != null) {
@@ -247,4 +317,63 @@ public class FileOutputCommitter extends OutputCommitter {
}
return taskTmpDir;
}
+
+ @Override
+ public boolean isRecoverySupported() {
+ return true;
+ }
+
+ @Override
+ public void recoverTask(TaskAttemptContext context)
+ throws IOException {
+ Path outputPath = FileOutputFormat.getOutputPath(context.getJobConf());
+ context.progress();
+ Path jobOutputPath = new Path(outputPath, getJobTempDirName(context));
+ int previousAttempt =
+ context.getConfiguration().getInt(
+ MRConstants.APPLICATION_ATTEMPT_ID, 0) - 1;
+ if (previousAttempt < 0) {
+ LOG.warn("Cannot recover task output for first attempt...");
+ return;
+ }
+
+ FileSystem outputFileSystem =
+ outputPath.getFileSystem(context.getJobConf());
+ Path pathToRecover =
+ new Path(outputPath, getJobAttemptBaseDirName(previousAttempt));
+ if (outputFileSystem.exists(pathToRecover)) {
+ // Move the task outputs to their final place
+ LOG.debug("Trying to recover task from " + pathToRecover
+ + " into " + jobOutputPath);
+ moveJobOutputs(outputFileSystem,
+ pathToRecover, jobOutputPath, pathToRecover);
+ LOG.info("Saved output of job to " + jobOutputPath);
+ }
+ }
+
+ protected static String getJobAttemptBaseDirName(JobContext context) {
+ int appAttemptId =
+ context.getJobConf().getInt(
+ MRConstants.APPLICATION_ATTEMPT_ID, 0);
+ return getJobAttemptBaseDirName(appAttemptId);
+ }
+
+ protected static String getJobTempDirName(TaskAttemptContext context) {
+ int appAttemptId =
+ context.getJobConf().getInt(
+ MRConstants.APPLICATION_ATTEMPT_ID, 0);
+ return getJobAttemptBaseDirName(appAttemptId);
+ }
+
+ protected static String getJobAttemptBaseDirName(int appAttemptId) {
+ return FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR +
+ + appAttemptId;
+ }
+
+ protected static String getTaskAttemptBaseDirName(
+ TaskAttemptContext context) {
+ return getJobTempDirName(context) + Path.SEPARATOR +
+ FileOutputCommitter.TEMP_DIR_NAME + Path.SEPARATOR +
+ "_" + context.getTaskAttemptID().toString();
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
index 9382dc4a97b..0505e33ce54 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobClient.java
@@ -43,6 +43,7 @@ import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenRenewer;
import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
@@ -459,6 +460,37 @@ public class JobClient extends CLI {
cluster = new Cluster(conf);
}
+ @InterfaceAudience.Private
+ public static class Renewer extends TokenRenewer {
+
+ @Override
+ public boolean handleKind(Text kind) {
+ return DelegationTokenIdentifier.MAPREDUCE_DELEGATION_KIND.equals(kind);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public long renew(Token> token, Configuration conf
+ ) throws IOException, InterruptedException {
+ return new Cluster(conf).
+ renewDelegationToken((Token) token);
+ }
+
+ @SuppressWarnings("unchecked")
+ @Override
+ public void cancel(Token> token, Configuration conf
+ ) throws IOException, InterruptedException {
+ new Cluster(conf).
+ cancelDelegationToken((Token) token);
+ }
+
+ @Override
+ public boolean isManaged(Token> token) throws IOException {
+ return true;
+ }
+
+ }
+
/**
* Build a job client, connect to the indicated job tracker.
*
@@ -1048,22 +1080,24 @@ public class JobClient extends CLI {
* @return true if the renewal went well
* @throws InvalidToken
* @throws IOException
+ * @deprecated Use {@link Token#renew} instead
*/
public long renewDelegationToken(Token token
) throws InvalidToken, IOException,
InterruptedException {
- return cluster.renewDelegationToken(token);
+ return token.renew(getConf());
}
/**
* Cancel a delegation token from the JobTracker
* @param token the token to cancel
* @throws IOException
+ * @deprecated Use {@link Token#cancel} instead
*/
public void cancelDelegationToken(Token token
) throws InvalidToken, IOException,
InterruptedException {
- cluster.cancelDelegationToken(token);
+ token.cancel(getConf());
}
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
index b489d41b17c..9475681d6d9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobConf.java
@@ -1649,7 +1649,7 @@ public class JobConf extends Configuration {
* @see #setJobEndNotificationURI(String)
*/
public String getJobEndNotificationURI() {
- return get(JobContext.END_NOTIFICATION_URL);
+ return get(JobContext.MR_JOB_END_NOTIFICATION_URL);
}
/**
@@ -1669,7 +1669,7 @@ public class JobConf extends Configuration {
* JobCompletionAndChaining">Job Completion and Chaining
*/
public void setJobEndNotificationURI(String uri) {
- set(JobContext.END_NOTIFICATION_URL, uri);
+ set(JobContext.MR_JOB_END_NOTIFICATION_URL, uri);
}
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobEndNotifier.java
index d28e72290fd..c5fd9ca2289 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobEndNotifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobEndNotifier.java
@@ -102,8 +102,8 @@ public class JobEndNotifier {
String uri = conf.getJobEndNotificationURI();
if (uri != null) {
// +1 to make logic for first notification identical to a retry
- int retryAttempts = conf.getInt(JobContext.END_NOTIFICATION_RETRIES, 0) + 1;
- long retryInterval = conf.getInt(JobContext.END_NOTIFICATION_RETRIE_INTERVAL, 30000);
+ int retryAttempts = conf.getInt(JobContext.MR_JOB_END_RETRY_ATTEMPTS, 0) + 1;
+ long retryInterval = conf.getInt(JobContext.MR_JOB_END_RETRY_INTERVAL, 30000);
if (uri.contains("$jobId")) {
uri = uri.replace("$jobId", status.getJobID().toString());
}
diff --git a/hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobQueueClient.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
similarity index 100%
rename from hadoop-mapreduce-project/src/java/org/apache/hadoop/mapred/JobQueueClient.java
rename to hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/JobQueueClient.java
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
index 3d7363e5faa..1cc784b29b3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MRConstants.java
@@ -60,4 +60,9 @@ public interface MRConstants {
/** Used in MRv1, mostly in TaskTracker code **/
public static final String WORKDIR = "work";
+
+ /** Used on by MRv2 */
+ public static final String APPLICATION_ATTEMPT_ID =
+ "mapreduce.job.application.attempt.id";
+
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
index 01c0b1bba4c..7c47aa91d51 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
@@ -21,6 +21,7 @@ package org.apache.hadoop.mapred;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.DataOutputStream;
+import java.io.File;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
@@ -36,8 +37,10 @@ import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.FileSystem.Statistics;
+import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.fs.LocalFileSystem;
import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.RawLocalFileSystem;
import org.apache.hadoop.io.DataInputBuffer;
import org.apache.hadoop.io.RawComparator;
import org.apache.hadoop.io.SequenceFile;
@@ -1727,10 +1730,10 @@ class MapTask extends Task {
finalOutFileSize += rfs.getFileStatus(filename[i]).getLen();
}
if (numSpills == 1) { //the spill is the final output
- rfs.rename(filename[0],
+ sameVolRename(filename[0],
mapOutputFile.getOutputFileForWriteInVolume(filename[0]));
if (indexCacheList.size() == 0) {
- rfs.rename(mapOutputFile.getSpillIndexFile(0),
+ sameVolRename(mapOutputFile.getSpillIndexFile(0),
mapOutputFile.getOutputIndexFileForWriteInVolume(filename[0]));
} else {
indexCacheList.get(0).writeToFile(
@@ -1847,7 +1850,29 @@ class MapTask extends Task {
}
}
}
-
+
+ /**
+ * Rename srcPath to dstPath on the same volume. This is the same
+ * as RawLocalFileSystem's rename method, except that it will not
+ * fall back to a copy, and it will create the target directory
+ * if it doesn't exist.
+ */
+ private void sameVolRename(Path srcPath,
+ Path dstPath) throws IOException {
+ RawLocalFileSystem rfs = (RawLocalFileSystem)this.rfs;
+ File src = rfs.pathToFile(srcPath);
+ File dst = rfs.pathToFile(dstPath);
+ if (!dst.getParentFile().exists()) {
+ if (!dst.getParentFile().mkdirs()) {
+ throw new IOException("Unable to rename " + src + " to "
+ + dst + ": couldn't create parent directory");
+ }
+ }
+
+ if (!src.renameTo(dst)) {
+ throw new IOException("Unable to rename " + src + " to " + dst);
+ }
+ }
} // MapOutputBuffer
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java
index e2ab5fe3b9f..2a14755930b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Master.java
@@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.mapreduce.MRConfig;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.yarn.conf.YarnConfiguration;
public class Master {
@@ -33,20 +34,35 @@ public class Master {
}
public static String getMasterUserName(Configuration conf) {
- return conf.get(MRConfig.MASTER_USER_NAME);
+ String framework = conf.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
+ if (framework.equals(MRConfig.CLASSIC_FRAMEWORK_NAME)) {
+ return conf.get(MRConfig.MASTER_USER_NAME);
+ }
+ else {
+ return conf.get(YarnConfiguration.RM_PRINCIPAL);
+ }
}
public static InetSocketAddress getMasterAddress(Configuration conf) {
- String jobTrackerStr =
- conf.get(MRConfig.MASTER_ADDRESS, "localhost:8012");
- return NetUtils.createSocketAddr(jobTrackerStr);
+ String masterAddress;
+ String framework = conf.get(MRConfig.FRAMEWORK_NAME, MRConfig.YARN_FRAMEWORK_NAME);
+ if (framework.equals(MRConfig.CLASSIC_FRAMEWORK_NAME)) {
+ masterAddress = conf.get(MRConfig.MASTER_ADDRESS, "localhost:8012");
+ return NetUtils.createSocketAddr(masterAddress, 8012, MRConfig.MASTER_ADDRESS);
+ }
+ else {
+ masterAddress = conf.get(YarnConfiguration.RM_ADDRESS,
+ YarnConfiguration.DEFAULT_RM_ADDRESS);
+ return NetUtils.createSocketAddr(masterAddress, YarnConfiguration.DEFAULT_RM_PORT,
+ YarnConfiguration.RM_ADDRESS);
+ }
}
public static String getMasterPrincipal(Configuration conf)
throws IOException {
- String jtHostname = getMasterAddress(conf).getHostName();
- // get jobtracker principal for use as delegation token renewer
- return SecurityUtil.getServerPrincipal(getMasterUserName(conf), jtHostname);
+ String masterHostname = getMasterAddress(conf).getHostName();
+ // get kerberos principal for use as delegation token renewer
+ return SecurityUtil.getServerPrincipal(getMasterUserName(conf), masterHostname);
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCommitter.java
index efe784d8412..60fd7f99adc 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/OutputCommitter.java
@@ -146,6 +146,33 @@ public abstract class OutputCommitter
public abstract void abortTask(TaskAttemptContext taskContext)
throws IOException;
+ /**
+ * This method implements the new interface by calling the old method. Note
+ * that the input types are different between the new and old apis and this
+ * is a bridge between the two.
+ */
+ @Override
+ public boolean isRecoverySupported() {
+ return false;
+ }
+
+ /**
+ * Recover the task output.
+ *
+ * The retry-count for the job will be passed via the
+ * {@link MRConstants#APPLICATION_ATTEMPT_ID} key in
+ * {@link TaskAttemptContext#getConfiguration()} for the
+ * OutputCommitter.
+ *
+ * If an exception is thrown the task will be attempted again.
+ *
+ * @param taskContext Context of the task whose output is being recovered
+ * @throws IOException
+ */
+ public void recoverTask(TaskAttemptContext taskContext)
+ throws IOException {
+ }
+
/**
* This method implements the new interface by calling the old method. Note
* that the input types are different between the new and old apis and this
@@ -246,4 +273,17 @@ public abstract class OutputCommitter
) throws IOException {
abortTask((TaskAttemptContext) taskContext);
}
+
+ /**
+ * This method implements the new interface by calling the old method. Note
+ * that the input types are different between the new and old apis and this
+ * is a bridge between the two.
+ */
+ @Override
+ public final
+ void recoverTask(org.apache.hadoop.mapreduce.TaskAttemptContext taskContext
+ ) throws IOException {
+ recoverTask((TaskAttemptContext) taskContext);
+ }
+
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java
index f278c8528ce..969ddf17e31 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/ReduceTask.java
@@ -342,10 +342,14 @@ public class ReduceTask extends Task {
RawKeyValueIterator rIter = null;
boolean isLocal = false;
- // local iff framework == classic && master address == local
- String framework = job.get(MRConfig.FRAMEWORK_NAME, MRConfig.CLASSIC_FRAMEWORK_NAME);
- if (framework.equals(MRConfig.CLASSIC_FRAMEWORK_NAME)) {
- isLocal = "local".equals(job.get(MRConfig.MASTER_ADDRESS, "local"));
+ // local if
+ // 1) framework == local or
+ // 2) framework == null and job tracker address == local
+ String framework = job.get(MRConfig.FRAMEWORK_NAME);
+ String masterAddr = job.get(MRConfig.MASTER_ADDRESS, "local");
+ if ((framework == null && masterAddr.equals("local"))
+ || (framework != null && framework.equals(MRConfig.LOCAL_FRAMEWORK_NAME))) {
+ isLocal = true;
}
if (!isLocal) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
index 60b711be9a2..29ce4822b76 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/Task.java
@@ -1119,7 +1119,7 @@ abstract public class Task implements Writable, Configurable {
// delete the staging area for the job
JobConf conf = new JobConf(jobContext.getConfiguration());
if (!keepTaskFiles(conf)) {
- String jobTempDir = conf.get("mapreduce.job.dir");
+ String jobTempDir = conf.get(MRJobConfig.MAPREDUCE_JOB_DIR);
Path jobTempDirPath = new Path(jobTempDir);
FileSystem fs = jobTempDirPath.getFileSystem(conf);
fs.delete(jobTempDirPath, true);
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
index d60b64a4097..0b79837f62d 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/TaskLogAppender.java
@@ -93,7 +93,9 @@ public class TaskLogAppender extends FileAppender {
}
public void flush() {
- qw.flush();
+ if (qw != null) {
+ qw.flush();
+ }
}
@Override
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
index 33d5f81b4fc..460202167dd 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/Cluster.java
@@ -25,6 +25,8 @@ import java.util.ArrayList;
import java.util.List;
import java.util.ServiceLoader;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@@ -38,6 +40,7 @@ import org.apache.hadoop.mapreduce.protocol.ClientProtocol;
import org.apache.hadoop.mapreduce.protocol.ClientProtocolProvider;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.util.ConfigUtil;
+import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.UserGroupInformation;
@@ -62,7 +65,11 @@ public class Cluster {
private Path sysDir = null;
private Path stagingAreaDir = null;
private Path jobHistoryDir = null;
+ private static final Log LOG = LogFactory.getLog(Cluster.class);
+ private static ServiceLoader frameworkLoader =
+ ServiceLoader.load(ClientProtocolProvider.class);
+
static {
ConfigUtil.loadResources();
}
@@ -81,19 +88,34 @@ public class Cluster {
private void initialize(InetSocketAddress jobTrackAddr, Configuration conf)
throws IOException {
- for (ClientProtocolProvider provider : ServiceLoader
- .load(ClientProtocolProvider.class)) {
- ClientProtocol clientProtocol = null;
- if (jobTrackAddr == null) {
- clientProtocol = provider.create(conf);
- } else {
- clientProtocol = provider.create(jobTrackAddr, conf);
- }
+ synchronized (frameworkLoader) {
+ for (ClientProtocolProvider provider : frameworkLoader) {
+ LOG.debug("Trying ClientProtocolProvider : "
+ + provider.getClass().getName());
+ ClientProtocol clientProtocol = null;
+ try {
+ if (jobTrackAddr == null) {
+ clientProtocol = provider.create(conf);
+ } else {
+ clientProtocol = provider.create(jobTrackAddr, conf);
+ }
- if (clientProtocol != null) {
- clientProtocolProvider = provider;
- client = clientProtocol;
- break;
+ if (clientProtocol != null) {
+ clientProtocolProvider = provider;
+ client = clientProtocol;
+ LOG.debug("Picked " + provider.getClass().getName()
+ + " as the ClientProtocolProvider");
+ break;
+ }
+ else {
+ LOG.info("Cannot pick " + provider.getClass().getName()
+ + " as the ClientProtocolProvider - returned null protocol");
+ }
+ }
+ catch (Exception e) {
+ LOG.info("Failed to use " + provider.getClass().getName()
+ + " due to error: " + e.getMessage());
+ }
}
}
@@ -191,7 +213,20 @@ public class Cluster {
throws IOException, InterruptedException {
return client.getQueue(name);
}
-
+
+ /**
+ * Get log parameters for the specified jobID or taskAttemptID
+ * @param jobID the job id.
+ * @param taskAttemptID the task attempt id. Optional.
+ * @return the LogParams
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public LogParams getLogParams(JobID jobID, TaskAttemptID taskAttemptID)
+ throws IOException, InterruptedException {
+ return client.getLogFileParams(jobID, taskAttemptID);
+ }
+
/**
* Get current cluster status.
*
@@ -371,6 +406,7 @@ public class Cluster {
* @return the new expiration time
* @throws InvalidToken
* @throws IOException
+ * @deprecated Use {@link Token#renew} instead
*/
public long renewDelegationToken(Token token
) throws InvalidToken, IOException,
@@ -387,6 +423,7 @@ public class Cluster {
* Cancel a delegation token from the JobTracker
* @param token the token to cancel
* @throws IOException
+ * @deprecated Use {@link Token#cancel} instead
*/
public void cancelDelegationToken(Token token
) throws IOException,
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ContextFactory.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ContextFactory.java
index 1b1a85b7af4..51adf750d70 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ContextFactory.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/ContextFactory.java
@@ -123,7 +123,7 @@ public class ContextFactory {
WRAPPED_CONTEXT_FIELD = null;
}
MAP_CONTEXT_CONSTRUCTOR.setAccessible(true);
- REPORTER_FIELD = taskIOContextCls.getDeclaredField("reporter");
+ REPORTER_FIELD = taskContextCls.getDeclaredField("reporter");
REPORTER_FIELD.setAccessible(true);
READER_FIELD = mapContextCls.getDeclaredField("reader");
READER_FIELD.setAccessible(true);
@@ -141,7 +141,8 @@ public class ContextFactory {
}
/**
- * Clone a job or task attempt context with a new configuration.
+ * Clone a {@link JobContext} or {@link TaskAttemptContext} with a
+ * new configuration.
* @param original the original context
* @param conf the new configuration
* @return a new context object
@@ -176,7 +177,8 @@ public class ContextFactory {
}
/**
- * Copy a mapper context, optionally replacing the input and output.
+ * Copy a custom WrappedMapper.Context, optionally replacing
+ * the input and output.
* @param input key type
* @param input value type
* @param output key type
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
index 6f57f1733ad..6edda66ca15 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobStatus.java
@@ -92,6 +92,11 @@ public class JobStatus implements Writable, Cloneable {
private boolean isRetired;
private String historyFile = "";
private String trackingUrl ="";
+ private int numUsedSlots;
+ private int numReservedSlots;
+ private int usedMem;
+ private int reservedMem;
+ private int neededMem;
/**
@@ -487,6 +492,76 @@ public class JobStatus implements Writable, Cloneable {
return historyFile;
}
+ /**
+ * @return number of used mapred slots
+ */
+ public int getNumUsedSlots() {
+ return numUsedSlots;
+ }
+
+ /**
+ * @param n number of used mapred slots
+ */
+ public void setNumUsedSlots(int n) {
+ numUsedSlots = n;
+ }
+
+ /**
+ * @return the number of reserved slots
+ */
+ public int getNumReservedSlots() {
+ return numReservedSlots;
+ }
+
+ /**
+ * @param n the number of reserved slots
+ */
+ public void setNumReservedSlots(int n) {
+ this.numReservedSlots = n;
+ }
+
+ /**
+ * @return the used memory
+ */
+ public int getUsedMem() {
+ return usedMem;
+ }
+
+ /**
+ * @param m the used memory
+ */
+ public void setUsedMem(int m) {
+ this.usedMem = m;
+ }
+
+ /**
+ * @return the reserved memory
+ */
+ public int getReservedMem() {
+ return reservedMem;
+ }
+
+ /**
+ * @param r the reserved memory
+ */
+ public void setReservedMem(int r) {
+ this.reservedMem = r;
+ }
+
+ /**
+ * @return the needed memory
+ */
+ public int getNeededMem() {
+ return neededMem;
+ }
+
+ /**
+ * @param n the needed memory
+ */
+ public void setNeededMem(int n) {
+ this.neededMem = n;
+ }
+
public String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append("job-id : " + jobid);
@@ -499,6 +574,11 @@ public class JobStatus implements Writable, Cloneable {
buffer.append("user-name : " + user);
buffer.append("priority : " + priority);
buffer.append("scheduling-info : " + schedulingInfo);
+ buffer.append("num-used-slots" + numUsedSlots);
+ buffer.append("num-reserved-slots" + numReservedSlots);
+ buffer.append("used-mem" + usedMem);
+ buffer.append("reserved-mem" + reservedMem);
+ buffer.append("needed-mem" + neededMem);
return buffer.toString();
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
index 2224cb967f5..f63352b4dd1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/JobSubmitter.java
@@ -322,6 +322,9 @@ class JobSubmitter {
JobStatus submitJobInternal(Job job, Cluster cluster)
throws ClassNotFoundException, InterruptedException, IOException {
+ //validate the jobs output specs
+ checkSpecs(job);
+
Path jobStagingArea = JobSubmissionFiles.getStagingDir(cluster,
job.getConfiguration());
//configure the command line options correctly on the submitting dfs
@@ -338,7 +341,9 @@ class JobSubmitter {
Path submitJobDir = new Path(jobStagingArea, jobId.toString());
JobStatus status = null;
try {
- conf.set("mapreduce.job.dir", submitJobDir.toString());
+ conf.set("hadoop.http.filter.initializers",
+ "org.apache.hadoop.yarn.server.webproxy.amfilter.AmFilterInitializer");
+ conf.set(MRJobConfig.MAPREDUCE_JOB_DIR, submitJobDir.toString());
LOG.debug("Configuring job " + jobId + " with " + submitJobDir
+ " as the submit dir");
// get delegation token for the dir
@@ -349,8 +354,6 @@ class JobSubmitter {
copyAndConfigureFiles(job, submitJobDir);
Path submitJobFile = JobSubmissionFiles.getJobConfPath(submitJobDir);
-
- checkSpecs(job);
// Create the splits for the job
LOG.debug("Creating splits at " + jtFs.makeQualified(submitJobDir));
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
index 2a9823c3854..4516cb9eda4 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRConfig.java
@@ -67,6 +67,7 @@ public interface MRConfig {
public static final String FRAMEWORK_NAME = "mapreduce.framework.name";
public static final String CLASSIC_FRAMEWORK_NAME = "classic";
public static final String YARN_FRAMEWORK_NAME = "yarn";
+ public static final String LOCAL_FRAMEWORK_NAME = "local";
public static final String TASK_LOCAL_OUTPUT_CLASS =
"mapreduce.task.local.output.class";
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
index a3e5a6cf615..769d842c607 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/MRJobConfig.java
@@ -17,6 +17,7 @@
*/
package org.apache.hadoop.mapreduce;
+import org.apache.hadoop.util.PlatformName;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@@ -90,12 +91,6 @@ public interface MRJobConfig {
public static final String WORKING_DIR = "mapreduce.job.working.dir";
- public static final String END_NOTIFICATION_URL = "mapreduce.job.end-notification.url";
-
- public static final String END_NOTIFICATION_RETRIES = "mapreduce.job.end-notification.retry.attempts";
-
- public static final String END_NOTIFICATION_RETRIE_INTERVAL = "mapreduce.job.end-notification.retry.interval";
-
public static final String CLASSPATH_ARCHIVES = "mapreduce.job.classpath.archives";
public static final String CLASSPATH_FILES = "mapreduce.job.classpath.files";
@@ -237,6 +232,8 @@ public interface MRJobConfig {
public static final String REDUCE_JAVA_OPTS = "mapreduce.reduce.java.opts";
public static final String REDUCE_ULIMIT = "mapreduce.reduce.ulimit";
+
+ public static final String MAPREDUCE_JOB_DIR = "mapreduce.job.dir";
public static final String REDUCE_MAX_ATTEMPTS = "mapreduce.reduce.maxattempts";
@@ -272,7 +269,12 @@ public interface MRJobConfig {
public static final String JOB_ACL_VIEW_JOB = "mapreduce.job.acl-view-job";
+ public static final String DEFAULT_JOB_ACL_VIEW_JOB = " ";
+
public static final String JOB_ACL_MODIFY_JOB = "mapreduce.job.acl-modify-job";
+
+ public static final String DEFAULT_JOB_ACL_MODIFY_JOB = " ";
+
public static final String JOB_SUBMITHOST =
"mapreduce.job.submithostname";
public static final String JOB_SUBMITHOSTADDR =
@@ -323,9 +325,9 @@ public interface MRJobConfig {
public static final String DEFAULT_MR_AM_COMMAND_OPTS = "-Xmx1536m";
/** Root Logging level passed to the MR app master.*/
- public static final String MR_AM_LOG_OPTS =
- MR_AM_PREFIX+"log-opts";
- public static final String DEFAULT_MR_AM_LOG_OPTS = "INFO";
+ public static final String MR_AM_LOG_LEVEL =
+ MR_AM_PREFIX+"log.level";
+ public static final String DEFAULT_MR_AM_LOG_LEVEL = "INFO";
/**The number of splits when reporting progress in MR*/
public static final String MR_AM_NUM_PROGRESS_SPLITS =
@@ -384,11 +386,11 @@ public interface MRJobConfig {
MR_AM_PREFIX
+ "job.task.estimator.exponential.smooth.lambda-ms";
- public static final long DEFAULT_MR_AM_TASK_ESTIMATOR_SMNOOTH_LAMBDA_MS =
+ public static final long DEFAULT_MR_AM_TASK_ESTIMATOR_SMOOTH_LAMBDA_MS =
1000L * 60;
/** true if the smoothing rate should be exponential.*/
- public static final String MR_AM_TASK_EXTIMATOR_EXPONENTIAL_RATE_ENABLE =
+ public static final String MR_AM_TASK_ESTIMATOR_EXPONENTIAL_RATE_ENABLE =
MR_AM_PREFIX + "job.task.estimator.exponential.smooth.rate";
/** The number of threads used to handle task RPC calls.*/
@@ -401,6 +403,15 @@ public interface MRJobConfig {
MR_AM_PREFIX + "scheduler.heartbeat.interval-ms";
public static final int DEFAULT_MR_AM_TO_RM_HEARTBEAT_INTERVAL_MS = 2000;
+ /**
+ * If contact with RM is lost, the AM will wait MR_AM_TO_RM_WAIT_INTERVAL_MS
+ * milliseconds before aborting. During this interval, AM will still try
+ * to contact the RM.
+ */
+ public static final String MR_AM_TO_RM_WAIT_INTERVAL_MS =
+ MR_AM_PREFIX + "scheduler.connection.wait.interval-ms";
+ public static final int DEFAULT_MR_AM_TO_RM_WAIT_INTERVAL_MS = 360000;
+
/**
* Boolean. Create the base dirs in the JobHistoryEventHandler
* Set to false for multi-user clusters. This is an internal config that
@@ -428,7 +439,7 @@ public interface MRJobConfig {
"mapreduce.admin.user.env";
public static final String DEFAULT_MAPRED_ADMIN_USER_ENV =
- "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib";
+ "LD_LIBRARY_PATH=$HADOOP_COMMON_HOME/lib/native/" + PlatformName.getPlatformName();
public static final String WORKDIR = "work";
@@ -436,10 +447,13 @@ public interface MRJobConfig {
public static final String HADOOP_WORK_DIR = "HADOOP_WORK_DIR";
+ // Environment variables used by Pipes. (TODO: these
+ // do not appear to be used by current pipes source code!)
public static final String STDOUT_LOGFILE_ENV = "STDOUT_LOGFILE_ENV";
-
public static final String STDERR_LOGFILE_ENV = "STDERR_LOGFILE_ENV";
+ public static final String APPLICATION_ATTEMPT_ID_ENV = "APPLICATION_ATTEMPT_ID_ENV";
+
// This should be the directory where splits file gets localized on the node
// running ApplicationMaster.
public static final String JOB_SUBMIT_DIR = "jobSubmitDir";
@@ -475,4 +489,33 @@ public interface MRJobConfig {
public static final String APPLICATION_ATTEMPT_ID =
"mapreduce.job.application.attempt.id";
+
+ /**
+ * Job end notification.
+ */
+ public static final String MR_JOB_END_NOTIFICATION_URL =
+ "mapreduce.job.end-notification.url";
+
+ public static final String MR_JOB_END_RETRY_ATTEMPTS =
+ "mapreduce.job.end-notification.retry.attempts";
+
+ public static final String MR_JOB_END_RETRY_INTERVAL =
+ "mapreduce.job.end-notification.retry.interval";
+
+ public static final String MR_JOB_END_NOTIFICATION_MAX_ATTEMPTS =
+ "mapreduce.job.end-notification.max.attempts";
+
+ public static final String MR_JOB_END_NOTIFICATION_MAX_RETRY_INTERVAL =
+ "mapreduce.job.end-notification.max.retry.interval";
+
+ /*
+ * MR AM Service Authorization
+ */
+ public static final String
+ MR_AM_SECURITY_SERVICE_AUTHORIZATION_TASK_UMBILICAL =
+ "security.job.task.protocol.acl";
+ public static final String
+ MR_AM_SECURITY_SERVICE_AUTHORIZATION_CLIENT =
+ "security.job.client.protocol.acl";
+
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AMStartedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AMStartedEvent.java
new file mode 100644
index 00000000000..1e7ce4c7746
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/AMStartedEvent.java
@@ -0,0 +1,125 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.ConverterUtils;
+
+import org.apache.avro.util.Utf8;
+
+/**
+ * Event to record start of a task attempt
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class AMStartedEvent implements HistoryEvent {
+ private AMStarted datum = new AMStarted();
+
+ /**
+ * Create an event to record the start of an MR AppMaster
+ *
+ * @param appAttemptId
+ * the application attempt id.
+ * @param startTime
+ * the start time of the AM.
+ * @param containerId
+ * the containerId of the AM.
+ * @param nodeManagerHost
+ * the node on which the AM is running.
+ * @param nodeManagerPort
+ * the port on which the AM is running.
+ * @param nodeManagerHttpPort
+ * the httpPort for the node running the AM.
+ */
+ public AMStartedEvent(ApplicationAttemptId appAttemptId, long startTime,
+ ContainerId containerId, String nodeManagerHost, int nodeManagerPort,
+ int nodeManagerHttpPort) {
+ datum.applicationAttemptId = new Utf8(appAttemptId.toString());
+ datum.startTime = startTime;
+ datum.containerId = new Utf8(containerId.toString());
+ datum.nodeManagerHost = new Utf8(nodeManagerHost);
+ datum.nodeManagerPort = nodeManagerPort;
+ datum.nodeManagerHttpPort = nodeManagerHttpPort;
+ }
+
+ AMStartedEvent() {
+ }
+
+ public Object getDatum() {
+ return datum;
+ }
+
+ public void setDatum(Object datum) {
+ this.datum = (AMStarted) datum;
+ }
+
+ /**
+ * @return the ApplicationAttemptId
+ */
+ public ApplicationAttemptId getAppAttemptId() {
+ return ConverterUtils.toApplicationAttemptId(datum.applicationAttemptId
+ .toString());
+ }
+
+ /**
+ * @return the start time for the MRAppMaster
+ */
+ public long getStartTime() {
+ return datum.startTime;
+ }
+
+ /**
+ * @return the ContainerId for the MRAppMaster.
+ */
+ public ContainerId getContainerId() {
+ return ConverterUtils.toContainerId(datum.containerId.toString());
+ }
+
+ /**
+ * @return the node manager host.
+ */
+ public String getNodeManagerHost() {
+ return datum.nodeManagerHost.toString();
+ }
+
+ /**
+ * @return the node manager port.
+ */
+ public int getNodeManagerPort() {
+ return datum.nodeManagerPort;
+ }
+
+ /**
+ * @return the http port for the tracker.
+ */
+ public int getNodeManagerHttpPort() {
+ return datum.nodeManagerHttpPort;
+ }
+
+ /** Get the attempt id */
+
+ @Override
+ public EventType getEventType() {
+ return EventType.AM_STARTED;
+ }
+}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
index 6f86516ee7a..5d74b802189 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventReader.java
@@ -33,7 +33,6 @@ import org.apache.hadoop.mapreduce.Counters;
import org.apache.avro.Schema;
import org.apache.avro.io.Decoder;
import org.apache.avro.io.DecoderFactory;
-import org.apache.avro.io.JsonDecoder;
import org.apache.avro.io.DatumReader;
import org.apache.avro.specific.SpecificDatumReader;
@@ -146,8 +145,10 @@ public class EventReader implements Closeable {
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
case CLEANUP_ATTEMPT_KILLED:
result = new TaskAttemptUnsuccessfulCompletionEvent(); break;
+ case AM_STARTED:
+ result = new AMStartedEvent(); break;
default:
- throw new RuntimeException("unexpected event type!");
+ throw new RuntimeException("unexpected event type: " + wrapper.type);
}
result.setDatum(wrapper.event);
return result;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
index 31cf22eddd4..b953da1f97a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/EventWriter.java
@@ -25,7 +25,6 @@ import org.apache.avro.Schema;
import org.apache.avro.io.DatumWriter;
import org.apache.avro.io.Encoder;
import org.apache.avro.io.EncoderFactory;
-import org.apache.avro.io.JsonEncoder;
import org.apache.avro.specific.SpecificDatumWriter;
import org.apache.avro.util.Utf8;
import org.apache.commons.logging.Log;
@@ -72,6 +71,7 @@ class EventWriter {
void flush() throws IOException {
encoder.flush();
out.flush();
+ out.hflush();
}
void close() throws IOException {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java
index 2dda8f70647..a30748cd651 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/HistoryEvent.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.IOException;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
index fe92cfe3769..e6dd5c10b2b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobHistoryParser.java
@@ -20,6 +20,8 @@ package org.apache.hadoop.mapreduce.jobhistory;
import java.io.IOException;
import java.util.HashMap;
+import java.util.LinkedList;
+import java.util.List;
import java.util.Map;
import org.apache.hadoop.classification.InterfaceAudience;
@@ -37,6 +39,8 @@ import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapred.TaskStatus;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.security.authorize.AccessControlList;
+import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
+import org.apache.hadoop.yarn.api.records.ContainerId;
/**
* Default Parser for the JobHistory files. Typical usage is
@@ -174,6 +178,9 @@ public class JobHistoryParser {
case CLEANUP_ATTEMPT_FINISHED:
handleTaskAttemptFinishedEvent((TaskAttemptFinishedEvent) event);
break;
+ case AM_STARTED:
+ handleAMStartedEvent((AMStartedEvent) event);
+ break;
default:
break;
}
@@ -202,6 +209,7 @@ public class JobHistoryParser {
attemptInfo.sortFinishTime = event.getSortFinishTime();
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = event.getHostname();
+ attemptInfo.rackname = event.getRackName();
}
private void handleMapAttemptFinishedEvent(MapAttemptFinishedEvent event) {
@@ -214,6 +222,7 @@ public class JobHistoryParser {
attemptInfo.mapFinishTime = event.getMapFinishTime();
attemptInfo.counters = event.getCounters();
attemptInfo.hostname = event.getHostname();
+ attemptInfo.rackname = event.getRackname();
}
private void handleTaskAttemptFailedEvent(
@@ -240,6 +249,8 @@ public class JobHistoryParser {
attemptInfo.httpPort = event.getHttpPort();
attemptInfo.trackerName = event.getTrackerName();
attemptInfo.taskType = event.getTaskType();
+ attemptInfo.shufflePort = event.getShufflePort();
+ attemptInfo.containerId = event.getContainerId();
taskInfo.attemptsMap.put(attemptId, attemptInfo);
}
@@ -304,6 +315,21 @@ public class JobHistoryParser {
info.totalReduces = event.getTotalReduces();
info.uberized = event.getUberized();
}
+
+ private void handleAMStartedEvent(AMStartedEvent event) {
+ AMInfo amInfo = new AMInfo();
+ amInfo.appAttemptId = event.getAppAttemptId();
+ amInfo.startTime = event.getStartTime();
+ amInfo.containerId = event.getContainerId();
+ amInfo.nodeManagerHost = event.getNodeManagerHost();
+ amInfo.nodeManagerPort = event.getNodeManagerPort();
+ amInfo.nodeManagerHttpPort = event.getNodeManagerHttpPort();
+ if (info.amInfos == null) {
+ info.amInfos = new LinkedList();
+ }
+ info.amInfos.add(amInfo);
+ info.latestAmInfo = amInfo;
+ }
private void handleJobInfoChangeEvent(JobInfoChangeEvent event) {
info.submitTime = event.getSubmitTime();
@@ -347,6 +373,8 @@ public class JobHistoryParser {
Map jobACLs;
Map tasksMap;
+ List amInfos;
+ AMInfo latestAmInfo;
boolean uberized;
/** Create a job info object where job information will be stored
@@ -376,7 +404,9 @@ public class JobHistoryParser {
System.out.println("REDUCE_COUNTERS:" + reduceCounters.toString());
System.out.println("TOTAL_COUNTERS: " + totalCounters.toString());
System.out.println("UBERIZED: " + uberized);
-
+ for (AMInfo amInfo : amInfos) {
+ amInfo.printAll();
+ }
for (TaskInfo ti: tasksMap.values()) {
ti.printAll();
}
@@ -426,6 +456,10 @@ public class JobHistoryParser {
public Map getJobACLs() { return jobACLs; }
/** @return the uberized status of this job */
public boolean getUberized() { return uberized; }
+ /** @return the AMInfo for the job's AppMaster */
+ public List getAMInfos() { return amInfos; }
+ /** @return the AMInfo for the newest AppMaster */
+ public AMInfo getLatestAMInfo() { return latestAmInfo; };
}
/**
@@ -506,7 +540,10 @@ public class JobHistoryParser {
String trackerName;
Counters counters;
int httpPort;
+ int shufflePort;
String hostname;
+ String rackname;
+ ContainerId containerId;
/** Create a Task Attempt Info which will store attempt level information
* on a history parse.
@@ -514,8 +551,9 @@ public class JobHistoryParser {
public TaskAttemptInfo() {
startTime = finishTime = shuffleFinishTime = sortFinishTime =
mapFinishTime = -1;
- error = state = trackerName = hostname = "";
+ error = state = trackerName = hostname = rackname = "";
httpPort = -1;
+ shufflePort = -1;
}
/**
* Print all the information about this attempt.
@@ -530,6 +568,8 @@ public class JobHistoryParser {
System.out.println("TASK_TYPE:" + taskType);
System.out.println("TRACKER_NAME:" + trackerName);
System.out.println("HTTP_PORT:" + httpPort);
+ System.out.println("SHUFFLE_PORT:" + shufflePort);
+ System.out.println("CONTIANER_ID:" + containerId);
if (counters != null) {
System.out.println("COUNTERS:" + counters.toString());
}
@@ -559,9 +599,91 @@ public class JobHistoryParser {
public String getTrackerName() { return trackerName; }
/** @return the host name */
public String getHostname() { return hostname; }
+ /** @return the rack name */
+ public String getRackname() { return rackname; }
/** @return the counters for the attempt */
public Counters getCounters() { return counters; }
/** @return the HTTP port for the tracker */
public int getHttpPort() { return httpPort; }
+ /** @return the Shuffle port for the tracker */
+ public int getShufflePort() { return shufflePort; }
+ /** @return the ContainerId for the tracker */
+ public ContainerId getContainerId() { return containerId; }
}
+
+ /**
+ * Stores AM information
+ */
+ public static class AMInfo {
+ ApplicationAttemptId appAttemptId;
+ long startTime;
+ ContainerId containerId;
+ String nodeManagerHost;
+ int nodeManagerPort;
+ int nodeManagerHttpPort;
+
+ /**
+ * Create a AM Info which will store AM level information on a history
+ * parse.
+ */
+ public AMInfo() {
+ startTime = -1;
+ nodeManagerHost = "";
+ nodeManagerHttpPort = -1;
+ }
+
+ public AMInfo(ApplicationAttemptId appAttemptId, long startTime,
+ ContainerId containerId, String nodeManagerHost, int nodeManagerPort,
+ int nodeManagerHttpPort) {
+ this.appAttemptId = appAttemptId;
+ this.startTime = startTime;
+ this.containerId = containerId;
+ this.nodeManagerHost = nodeManagerHost;
+ this.nodeManagerPort = nodeManagerPort;
+ this.nodeManagerHttpPort = nodeManagerHttpPort;
+ }
+
+ /**
+ * Print all the information about this AM.
+ */
+ public void printAll() {
+ System.out.println("APPLICATION_ATTEMPT_ID:" + appAttemptId.toString());
+ System.out.println("START_TIME: " + startTime);
+ System.out.println("CONTAINER_ID: " + containerId.toString());
+ System.out.println("NODE_MANAGER_HOST: " + nodeManagerHost);
+ System.out.println("NODE_MANAGER_PORT: " + nodeManagerPort);
+ System.out.println("NODE_MANAGER_HTTP_PORT: " + nodeManagerHttpPort);
+ }
+
+ /** @return the ApplicationAttemptId */
+ public ApplicationAttemptId getAppAttemptId() {
+ return appAttemptId;
+ }
+
+ /** @return the start time of the AM */
+ public long getStartTime() {
+ return startTime;
+ }
+
+ /** @return the container id for the AM */
+ public ContainerId getContainerId() {
+ return containerId;
+ }
+
+ /** @return the host name for the node manager on which the AM is running */
+ public String getNodeManagerHost() {
+ return nodeManagerHost;
+ }
+
+ /** @return the port for the node manager running the AM */
+ public int getNodeManagerPort() {
+ return nodeManagerPort;
+ }
+
+ /** @return the http port for the node manager running the AM */
+ public int getNodeManagerHttpPort() {
+ return nodeManagerHttpPort;
+ }
+ }
+
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java
index 099941ec1ff..ed3ba1cbf5f 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobInitedEvent.java
@@ -18,8 +18,6 @@
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.IOException;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobID;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
index aded4e966a5..a1c374f522b 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/JobUnsuccessfulCompletionEvent.java
@@ -18,14 +18,11 @@
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.IOException;
-
+import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobID;
-import org.apache.avro.util.Utf8;
-
/**
* Event to record Failed and Killed completion of jobs
*
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
index e0959b08c9d..1f2a1cdf0db 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/MapAttemptFinishedEvent.java
@@ -18,17 +18,14 @@
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.IOException;
-
+import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapred.ProgressSplitsBlock;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapred.ProgressSplitsBlock;
-
-import org.apache.avro.util.Utf8;
/**
* Event to record successful completion of a map attempt
@@ -47,6 +44,7 @@ public class MapAttemptFinishedEvent implements HistoryEvent {
* @param mapFinishTime Finish time of the map phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the map executed
+ * @param rackName Name of the rack where the map executed
* @param state State string for the attempt
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
@@ -59,7 +57,7 @@ public class MapAttemptFinishedEvent implements HistoryEvent {
*/
public MapAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
- long mapFinishTime, long finishTime, String hostname,
+ long mapFinishTime, long finishTime, String hostname, String rackName,
String state, Counters counters,
int[][] allSplits) {
datum.taskid = new Utf8(id.getTaskID().toString());
@@ -69,6 +67,7 @@ public class MapAttemptFinishedEvent implements HistoryEvent {
datum.mapFinishTime = mapFinishTime;
datum.finishTime = finishTime;
datum.hostname = new Utf8(hostname);
+ datum.rackname = new Utf8(rackName);
datum.state = new Utf8(state);
datum.counters = EventWriter.toAvro(counters);
@@ -107,7 +106,8 @@ public class MapAttemptFinishedEvent implements HistoryEvent {
(TaskAttemptID id, TaskType taskType, String taskStatus,
long mapFinishTime, long finishTime, String hostname,
String state, Counters counters) {
- this(id, taskType, taskStatus, mapFinishTime, finishTime, hostname, state, counters, null);
+ this(id, taskType, taskStatus, mapFinishTime, finishTime, hostname, "",
+ state, counters, null);
}
@@ -136,6 +136,8 @@ public class MapAttemptFinishedEvent implements HistoryEvent {
public long getFinishTime() { return datum.finishTime; }
/** Get the host name */
public String getHostname() { return datum.hostname.toString(); }
+ /** Get the rack name */
+ public String getRackname() { return datum.rackname.toString(); }
/** Get the state string */
public String getState() { return datum.state.toString(); }
/** Get the counters */
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java
new file mode 100644
index 00000000000..b8f049c0775
--- /dev/null
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/NormalizedResourceEvent.java
@@ -0,0 +1,74 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapreduce.jobhistory;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapreduce.TaskType;
+
+/**
+ * Event to record the normalized map/reduce requirements.
+ *
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+public class NormalizedResourceEvent implements HistoryEvent {
+ private int memory;
+ private TaskType taskType;
+
+ /**
+ * Normalized request when sent to the Resource Manager.
+ * @param taskType the tasktype of the request.
+ * @param memory the normalized memory requirements.
+ */
+ public NormalizedResourceEvent(TaskType taskType, int memory) {
+ this.memory = memory;
+ this.taskType = taskType;
+ }
+
+ /**
+ * the tasktype for the event.
+ * @return the tasktype for the event.
+ */
+ public TaskType getTaskType() {
+ return this.taskType;
+ }
+
+ /**
+ * the normalized memory
+ * @return the normalized memory
+ */
+ public int getMemory() {
+ return this.memory;
+ }
+
+ @Override
+ public EventType getEventType() {
+ return EventType.NORMALIZED_RESOURCE;
+ }
+
+ @Override
+ public Object getDatum() {
+ throw new UnsupportedOperationException("Not a seriable object");
+ }
+
+ @Override
+ public void setDatum(Object datum) {
+ throw new UnsupportedOperationException("Not a seriable object");
+ }
+}
\ No newline at end of file
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
index fb20a2edc37..e2b4860f518 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/ReduceAttemptFinishedEvent.java
@@ -18,19 +18,15 @@
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.IOException;
-
+import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.mapred.ProgressSplitsBlock;
import org.apache.hadoop.mapreduce.Counters;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.hadoop.mapred.ProgressSplitsBlock;
-
-import org.apache.avro.util.Utf8;
-
/**
* Event to record successful completion of a reduce attempt
*
@@ -50,6 +46,7 @@ public class ReduceAttemptFinishedEvent implements HistoryEvent {
* @param sortFinishTime Finish time of the sort phase
* @param finishTime Finish time of the attempt
* @param hostname Name of the host where the attempt executed
+ * @param rackName Name of the rack where the attempt executed
* @param state State of the attempt
* @param counters Counters for the attempt
* @param allSplits the "splits", or a pixelated graph of various
@@ -60,7 +57,7 @@ public class ReduceAttemptFinishedEvent implements HistoryEvent {
public ReduceAttemptFinishedEvent
(TaskAttemptID id, TaskType taskType, String taskStatus,
long shuffleFinishTime, long sortFinishTime, long finishTime,
- String hostname, String state, Counters counters,
+ String hostname, String rackName, String state, Counters counters,
int[][] allSplits) {
datum.taskid = new Utf8(id.getTaskID().toString());
datum.attemptId = new Utf8(id.toString());
@@ -70,6 +67,7 @@ public class ReduceAttemptFinishedEvent implements HistoryEvent {
datum.sortFinishTime = sortFinishTime;
datum.finishTime = finishTime;
datum.hostname = new Utf8(hostname);
+ datum.rackname = new Utf8(rackName);
datum.state = new Utf8(state);
datum.counters = EventWriter.toAvro(counters);
@@ -110,7 +108,7 @@ public class ReduceAttemptFinishedEvent implements HistoryEvent {
String hostname, String state, Counters counters) {
this(id, taskType, taskStatus,
shuffleFinishTime, sortFinishTime, finishTime,
- hostname, state, counters, null);
+ hostname, "", state, counters, null);
}
ReduceAttemptFinishedEvent() {}
@@ -140,6 +138,8 @@ public class ReduceAttemptFinishedEvent implements HistoryEvent {
public long getFinishTime() { return datum.finishTime; }
/** Get the name of the host where the attempt ran */
public String getHostname() { return datum.hostname.toString(); }
+ /** Get the rack name of the node where the attempt ran */
+ public String getRackName() { return datum.rackname.toString(); }
/** Get the state string */
public String getState() { return datum.state.toString(); }
/** Get the counters for the attempt */
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java
index 204e6ba9a80..95d28b5c056 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskAttemptStartedEvent.java
@@ -18,13 +18,13 @@
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.IOException;
-
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
+import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.util.ConverterUtils;
import org.apache.avro.util.Utf8;
@@ -44,16 +44,28 @@ public class TaskAttemptStartedEvent implements HistoryEvent {
* @param startTime Start time of the attempt
* @param trackerName Name of the Task Tracker where attempt is running
* @param httpPort The port number of the tracker
+ * @param shufflePort The shuffle port number of the container
+ * @param containerId The containerId for the task attempt.
*/
public TaskAttemptStartedEvent( TaskAttemptID attemptId,
TaskType taskType, long startTime, String trackerName,
- int httpPort) {
+ int httpPort, int shufflePort, ContainerId containerId) {
datum.attemptId = new Utf8(attemptId.toString());
datum.taskid = new Utf8(attemptId.getTaskID().toString());
datum.startTime = startTime;
datum.taskType = new Utf8(taskType.name());
datum.trackerName = new Utf8(trackerName);
datum.httpPort = httpPort;
+ datum.shufflePort = shufflePort;
+ datum.containerId = new Utf8(containerId.toString());
+ }
+
+ // TODO Remove after MrV1 is removed.
+ // Using a dummy containerId to prevent jobHistory parse failures.
+ public TaskAttemptStartedEvent(TaskAttemptID attemptId, TaskType taskType,
+ long startTime, String trackerName, int httpPort, int shufflePort) {
+ this(attemptId, taskType, startTime, trackerName, httpPort, shufflePort,
+ ConverterUtils.toContainerId("container_-1_-1_-1_-1"));
}
TaskAttemptStartedEvent() {}
@@ -75,6 +87,8 @@ public class TaskAttemptStartedEvent implements HistoryEvent {
}
/** Get the HTTP port */
public int getHttpPort() { return datum.httpPort; }
+ /** Get the shuffle port */
+ public int getShufflePort() { return datum.shufflePort; }
/** Get the attempt id */
public TaskAttemptID getTaskAttemptId() {
return TaskAttemptID.forName(datum.attemptId.toString());
@@ -87,5 +101,8 @@ public class TaskAttemptStartedEvent implements HistoryEvent {
? EventType.MAP_ATTEMPT_STARTED
: EventType.REDUCE_ATTEMPT_STARTED;
}
-
+ /** Get the ContainerId */
+ public ContainerId getContainerId() {
+ return ConverterUtils.toContainerId(datum.containerId.toString());
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java
index 12639975e75..4c2b132b1c3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/jobhistory/TaskStartedEvent.java
@@ -18,15 +18,12 @@
package org.apache.hadoop.mapreduce.jobhistory;
-import java.io.IOException;
-
+import org.apache.avro.util.Utf8;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.TaskID;
import org.apache.hadoop.mapreduce.TaskType;
-import org.apache.avro.util.Utf8;
-
/**
* Event to record the start of a task
*
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
index 26390c7df2a..497ca317fd3 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/FileOutputCommitter.java
@@ -111,32 +111,48 @@ public class FileOutputCommitter extends OutputCommitter {
* @param context the job's context
*/
public void commitJob(JobContext context) throws IOException {
- //delete the task temp directory from the current jobtempdir
- Path tmpDir = new Path(outputPath, getJobAttemptBaseDirName(context) +
- Path.SEPARATOR + FileOutputCommitter.TEMP_DIR_NAME);
- FileSystem fileSys = tmpDir.getFileSystem(context.getConfiguration());
- if (fileSys.exists(tmpDir)) {
- fileSys.delete(tmpDir, true);
- } else {
- LOG.warn("Task temp dir could not be deleted " + tmpDir);
- }
-
- //move the job output to final place
- Path jobOutputPath =
- new Path(outputPath, getJobAttemptBaseDirName(context));
- moveJobOutputs(outputFileSystem, outputPath, jobOutputPath);
-
- // delete the _temporary folder and create a _done file in the o/p folder
- cleanupJob(context);
- if (shouldMarkOutputDir(context.getConfiguration())) {
- markOutputDirSuccessful(context);
+ if (outputPath != null) {
+ //delete the task temp directory from the current jobtempdir
+ Path tmpDir = new Path(outputPath, getJobAttemptBaseDirName(context) +
+ Path.SEPARATOR + FileOutputCommitter.TEMP_DIR_NAME);
+ FileSystem fileSys = tmpDir.getFileSystem(context.getConfiguration());
+ if (fileSys.exists(tmpDir)) {
+ fileSys.delete(tmpDir, true);
+ } else {
+ LOG.warn("Task temp dir could not be deleted " + tmpDir);
+ }
+
+ //move the job output to final place
+ Path jobOutputPath =
+ new Path(outputPath, getJobAttemptBaseDirName(context));
+ moveJobOutputs(outputFileSystem, jobOutputPath, outputPath, jobOutputPath);
+
+ // delete the _temporary folder and create a _done file in the o/p folder
+ cleanupJob(context);
+ if (shouldMarkOutputDir(context.getConfiguration())) {
+ markOutputDirSuccessful(context);
+ }
}
}
- private void moveJobOutputs(FileSystem fs,
+ /**
+ * Move job output to final location
+ * @param fs Filesystem handle
+ * @param origJobOutputPath The original location of the job output
+ * Required to generate the relative path for correct moving of data.
+ * @param finalOutputDir The final output directory to which the job output
+ * needs to be moved
+ * @param jobOutput The current job output directory being moved
+ * @throws IOException
+ */
+ private void moveJobOutputs(FileSystem fs, final Path origJobOutputPath,
Path finalOutputDir, Path jobOutput) throws IOException {
+ LOG.debug("Told to move job output from " + jobOutput
+ + " to " + finalOutputDir +
+ " and orig job output path is " + origJobOutputPath);
if (fs.isFile(jobOutput)) {
- Path finalOutputPath = getFinalPath(finalOutputDir, jobOutput, jobOutput);
+ Path finalOutputPath =
+ getFinalPath(finalOutputDir, jobOutput, origJobOutputPath);
if (!fs.rename(jobOutput, finalOutputPath)) {
if (!fs.delete(finalOutputPath, true)) {
throw new IOException("Failed to delete earlier output of job");
@@ -145,14 +161,18 @@ public class FileOutputCommitter extends OutputCommitter {
throw new IOException("Failed to save output of job");
}
}
- LOG.debug("Moved " + jobOutput + " to " + finalOutputPath);
+ LOG.debug("Moved job output file from " + jobOutput + " to " +
+ finalOutputPath);
} else if (fs.getFileStatus(jobOutput).isDirectory()) {
+ LOG.debug("Job output file " + jobOutput + " is a dir");
FileStatus[] paths = fs.listStatus(jobOutput);
- Path finalOutputPath = getFinalPath(finalOutputDir, jobOutput, jobOutput);
+ Path finalOutputPath =
+ getFinalPath(finalOutputDir, jobOutput, origJobOutputPath);
fs.mkdirs(finalOutputPath);
+ LOG.debug("Creating dirs along job output path " + finalOutputPath);
if (paths != null) {
for (FileStatus path : paths) {
- moveJobOutputs(fs, finalOutputDir, path.getPath());
+ moveJobOutputs(fs, origJobOutputPath, finalOutputDir, path.getPath());
}
}
}
@@ -233,6 +253,8 @@ public class FileOutputCommitter extends OutputCommitter {
throws IOException {
TaskAttemptID attemptId = context.getTaskAttemptID();
context.progress();
+ LOG.debug("Told to move taskoutput from " + taskOutput
+ + " to " + jobOutputDir);
if (fs.isFile(taskOutput)) {
Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput,
workPath);
@@ -248,9 +270,11 @@ public class FileOutputCommitter extends OutputCommitter {
}
LOG.debug("Moved " + taskOutput + " to " + finalOutputPath);
} else if(fs.getFileStatus(taskOutput).isDirectory()) {
+ LOG.debug("Taskoutput " + taskOutput + " is a dir");
FileStatus[] paths = fs.listStatus(taskOutput);
Path finalOutputPath = getFinalPath(jobOutputDir, taskOutput, workPath);
fs.mkdirs(finalOutputPath);
+ LOG.debug("Creating dirs along path " + finalOutputPath);
if (paths != null) {
for (FileStatus path : paths) {
moveTaskOutputs(context, fs, jobOutputDir, path.getPath());
@@ -281,12 +305,17 @@ public class FileOutputCommitter extends OutputCommitter {
* @throws IOException
*/
private Path getFinalPath(Path jobOutputDir, Path taskOutput,
- Path taskOutputPath) throws IOException {
- URI taskOutputUri = taskOutput.toUri();
- URI relativePath = taskOutputPath.toUri().relativize(taskOutputUri);
+ Path taskOutputPath) throws IOException {
+ URI taskOutputUri = taskOutput.makeQualified(outputFileSystem.getUri(),
+ outputFileSystem.getWorkingDirectory()).toUri();
+ URI taskOutputPathUri =
+ taskOutputPath.makeQualified(
+ outputFileSystem.getUri(),
+ outputFileSystem.getWorkingDirectory()).toUri();
+ URI relativePath = taskOutputPathUri.relativize(taskOutputUri);
if (taskOutputUri == relativePath) {
throw new IOException("Can not get the relative path: base = " +
- taskOutputPath + " child = " + taskOutput);
+ taskOutputPathUri + " child = " + taskOutputUri);
}
if (relativePath.getPath().length() > 0) {
return new Path(jobOutputDir, relativePath.getPath());
@@ -334,9 +363,12 @@ public class FileOutputCommitter extends OutputCommitter {
Path pathToRecover =
new Path(outputPath, getJobAttemptBaseDirName(previousAttempt));
+ LOG.debug("Trying to recover task from " + pathToRecover
+ + " into " + jobOutputPath);
if (outputFileSystem.exists(pathToRecover)) {
// Move the task outputs to their final place
- moveJobOutputs(outputFileSystem, jobOutputPath, pathToRecover);
+ moveJobOutputs(outputFileSystem,
+ pathToRecover, jobOutputPath, pathToRecover);
LOG.info("Saved output of job to " + jobOutputPath);
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
index 19339d34346..32f44f24d87 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/output/NullOutputFormat.java
@@ -18,6 +18,8 @@
package org.apache.hadoop.mapreduce.lib.output;
+import java.io.IOException;
+
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.mapreduce.JobContext;
@@ -56,6 +58,17 @@ public class NullOutputFormat extends OutputFormat {
}
public void setupJob(JobContext jobContext) { }
public void setupTask(TaskAttemptContext taskContext) { }
+
+ @Override
+ public boolean isRecoverySupported() {
+ return true;
+ }
+
+ @Override
+ public void recoverTask(TaskAttemptContext taskContext)
+ throws IOException {
+ // Nothing to do for recovering the task.
+ }
};
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
index 72a194ab0bc..ad58807e1b0 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/protocol/ClientProtocol.java
@@ -39,6 +39,7 @@ import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.mapreduce.server.jobtracker.JTConfig;
+import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.KerberosInfo;
import org.apache.hadoop.security.authorize.AccessControlList;
@@ -115,6 +116,8 @@ public interface ClientProtocol extends VersionedProtocol {
* MAPREDUCE-2337.
* Version 37: More efficient serialization format for framework counters
* (MAPREDUCE-901)
+ * Version 38: Added getLogFilePath(JobID, TaskAttemptID) as part of
+ * MAPREDUCE-3146
*/
public static final long versionID = 37L;
@@ -351,4 +354,16 @@ public interface ClientProtocol extends VersionedProtocol {
public void cancelDelegationToken(Token token
) throws IOException,
InterruptedException;
+
+ /**
+ * Gets the location of the log file for a job if no taskAttemptId is
+ * specified, otherwise gets the log location for the taskAttemptId.
+ * @param jobID the jobId.
+ * @param taskAttemptID the taskAttemptId.
+ * @return log params.
+ * @throws IOException
+ * @throws InterruptedException
+ */
+ public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
+ throws IOException, InterruptedException;
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
index 9e96b55ccbd..e4675b523a5 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/DelegationTokenRenewal.java
@@ -19,8 +19,6 @@
package org.apache.hadoop.mapreduce.security.token;
import java.io.IOException;
-import java.net.InetAddress;
-import java.net.URI;
import java.security.PrivilegedExceptionAction;
import java.util.Collection;
import java.util.Collections;
@@ -37,18 +35,10 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.tools.DelegationTokenFetcher;
-import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.JobID;
-import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.security.Credentials;
import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.util.StringUtils;
@@ -64,14 +54,14 @@ public class DelegationTokenRenewal {
*
*/
private static class DelegationTokenToRenew {
- public final Token token;
+ public final Token> token;
public final JobID jobId;
public final Configuration conf;
public long expirationDate;
public TimerTask timerTask;
public DelegationTokenToRenew(
- JobID jId, Token t,
+ JobID jId, Token> t,
Configuration newConf, long newExpirationDate) {
token = t;
jobId = jId;
@@ -124,10 +114,9 @@ public class DelegationTokenRenewal {
private static class DelegationTokenCancelThread extends Thread {
private static class TokenWithConf {
- Token token;
+ Token> token;
Configuration conf;
- TokenWithConf(Token token,
- Configuration conf) {
+ TokenWithConf(Token> token, Configuration conf) {
this.token = token;
this.conf = conf;
}
@@ -139,7 +128,7 @@ public class DelegationTokenRenewal {
super("Delegation Token Canceler");
setDaemon(true);
}
- public void cancelToken(Token token,
+ public void cancelToken(Token> token,
Configuration conf) {
TokenWithConf tokenWithConf = new TokenWithConf(token, conf);
while (!queue.offer(tokenWithConf)) {
@@ -158,25 +147,21 @@ public class DelegationTokenRenewal {
TokenWithConf tokenWithConf = null;
try {
tokenWithConf = queue.take();
- DistributedFileSystem dfs = null;
- try {
- // do it over rpc. For that we need DFS object
- dfs = getDFSForToken(tokenWithConf.token, tokenWithConf.conf);
- } catch (Exception e) {
- LOG.info("couldn't get DFS to cancel. Will retry over HTTPS");
- dfs = null;
- }
-
- if(dfs != null) {
- dfs.cancelDelegationToken(tokenWithConf.token);
- } else {
- cancelDelegationTokenOverHttps(tokenWithConf.token,
- tokenWithConf.conf);
- }
+ final TokenWithConf current = tokenWithConf;
+
if (LOG.isDebugEnabled()) {
- LOG.debug("Canceling token " + tokenWithConf.token.getService() +
- " for dfs=" + dfs);
+ LOG.debug("Canceling token " + tokenWithConf.token.getService());
}
+ // need to use doAs so that http can find the kerberos tgt
+ UserGroupInformation.getLoginUser().doAs(
+ new PrivilegedExceptionAction() {
+
+ @Override
+ public Void run() throws Exception {
+ current.token.cancel(current.conf);
+ return null;
+ }
+ });
} catch (IOException e) {
LOG.warn("Failed to cancel token " + tokenWithConf.token + " " +
StringUtils.stringifyException(e));
@@ -195,119 +180,29 @@ public class DelegationTokenRenewal {
delegationTokens.add(t);
}
- // kind of tokens we currently renew
- private static final Text kindHdfs =
- DelegationTokenIdentifier.HDFS_DELEGATION_KIND;
-
- @SuppressWarnings("unchecked")
public static synchronized void registerDelegationTokensForRenewal(
- JobID jobId, Credentials ts, Configuration conf) {
+ JobID jobId, Credentials ts, Configuration conf) throws IOException {
if(ts==null)
return; //nothing to add
- Collection > tokens = ts.getAllTokens();
+ Collection > tokens = ts.getAllTokens();
long now = System.currentTimeMillis();
-
- for(Token extends TokenIdentifier> t : tokens) {
- // currently we only check for HDFS delegation tokens
- // later we can add more different types.
- if(! t.getKind().equals(kindHdfs)) {
- continue;
- }
- Token dt =
- (Token)t;
-
+
+ for (Token> t : tokens) {
// first renew happens immediately
- DelegationTokenToRenew dtr =
- new DelegationTokenToRenew(jobId, dt, conf, now);
+ if (t.isManaged()) {
+ DelegationTokenToRenew dtr = new DelegationTokenToRenew(jobId, t, conf,
+ now);
- addTokenToList(dtr);
-
- setTimerForTokenRenewal(dtr, true);
- LOG.info("registering token for renewal for service =" + dt.getService()+
- " and jobID = " + jobId);
- }
- }
-
- private static String getHttpAddressForToken(
- Token token, final Configuration conf)
- throws IOException {
+ addTokenToList(dtr);
- String[] ipaddr = token.getService().toString().split(":");
-
- InetAddress iaddr = InetAddress.getByName(ipaddr[0]);
- String dnsName = iaddr.getCanonicalHostName();
-
- // in case it is a different cluster it may have a different port
- String httpsPort = conf.get("dfs.hftp.https.port");
- if(httpsPort == null) {
- // get from this cluster
- httpsPort = conf.get(DFSConfigKeys.DFS_HTTPS_PORT_KEY,
- "" + DFSConfigKeys.DFS_HTTPS_PORT_DEFAULT);
- }
-
- // always use https (it is for security only)
- return "https://" + dnsName+":"+httpsPort;
- }
-
- protected static long renewDelegationTokenOverHttps(
- final Token token, final Configuration conf)
- throws InterruptedException, IOException{
- final String httpAddress = getHttpAddressForToken(token, conf);
- // will be chaged to debug
- LOG.info("address to renew=" + httpAddress + "; tok=" + token.getService());
- Long expDate = (Long) UserGroupInformation.getLoginUser().doAs(
- new PrivilegedExceptionAction() {
- public Long run() throws IOException {
- return DelegationTokenFetcher.renewDelegationToken(httpAddress, token);
- }
- });
- LOG.info("Renew over HTTP done. addr="+httpAddress+";res="+expDate);
- return expDate;
- }
-
- private static long renewDelegationToken(DelegationTokenToRenew dttr)
- throws Exception {
- long newExpirationDate=System.currentTimeMillis()+3600*1000;
- Token token = dttr.token;
- Configuration conf = dttr.conf;
- if(token.getKind().equals(kindHdfs)) {
- DistributedFileSystem dfs=null;
-
- try {
- // do it over rpc. For that we need DFS object
- dfs = getDFSForToken(token, conf);
- } catch (IOException e) {
- LOG.info("couldn't get DFS to renew. Will retry over HTTPS");
- dfs = null;
+ setTimerForTokenRenewal(dtr, true);
+ LOG.info("registering token for renewal for service =" + t.getService()
+ + " and jobID = " + jobId);
}
-
- try {
- if(dfs != null)
- newExpirationDate = dfs.renewDelegationToken(token);
- else {
- // try HTTP
- newExpirationDate = renewDelegationTokenOverHttps(token, conf);
- }
- } catch (InvalidToken ite) {
- LOG.warn("invalid token - not scheduling for renew");
- removeFailedDelegationToken(dttr);
- throw new IOException("failed to renew token", ite);
- } catch (AccessControlException ioe) {
- LOG.warn("failed to renew token:"+token, ioe);
- removeFailedDelegationToken(dttr);
- throw new IOException("failed to renew token", ioe);
- } catch (Exception e) {
- LOG.warn("failed to renew token:"+token, e);
- // returns default expiration date
- }
- } else {
- throw new Exception("unknown token type to renew:"+token.getKind());
}
- return newExpirationDate;
}
-
-
+
/**
* Task - to renew a token
*
@@ -319,41 +214,29 @@ public class DelegationTokenRenewal {
@Override
public void run() {
- Token token = dttr.token;
+ Token> token = dttr.token;
long newExpirationDate=0;
try {
- newExpirationDate = renewDelegationToken(dttr);
- } catch (Exception e) {
- return; // message logged in renewDT method
- }
- if (LOG.isDebugEnabled())
- LOG.debug("renewing for:"+token.getService()+";newED=" +
- newExpirationDate);
-
- // new expiration date
- dttr.expirationDate = newExpirationDate;
- setTimerForTokenRenewal(dttr, false);// set the next one
- }
- }
-
- private static DistributedFileSystem getDFSForToken(
- Token token, final Configuration conf)
- throws Exception {
- DistributedFileSystem dfs = null;
- try {
- final URI uri = new URI (SCHEME + "://" + token.getService().toString());
- dfs =
- UserGroupInformation.getLoginUser().doAs(
- new PrivilegedExceptionAction() {
- public DistributedFileSystem run() throws IOException {
- return (DistributedFileSystem) FileSystem.get(uri, conf);
+ // need to use doAs so that http can find the kerberos tgt
+ dttr.expirationDate = UserGroupInformation.getLoginUser().doAs(
+ new PrivilegedExceptionAction() {
+
+ @Override
+ public Long run() throws Exception {
+ return dttr.token.renew(dttr.conf);
+ }
+ });
+
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("renewing for:" + token.getService() + ";newED="
+ + dttr.expirationDate);
}
- });
- } catch (Exception e) {
- LOG.warn("Failed to create a dfs to renew/cancel for:" + token.getService(), e);
- throw e;
- }
- return dfs;
+ setTimerForTokenRenewal(dttr, false);// set the next one
+ } catch (Exception e) {
+ LOG.error("Exception renewing token" + token + ". Not rescheduled", e);
+ removeFailedDelegationToken(dttr);
+ }
+ }
}
/**
@@ -372,15 +255,11 @@ public class DelegationTokenRenewal {
renewIn = now + expiresIn - expiresIn/10; // little before expiration
}
- try {
- // need to create new timer every time
- TimerTask tTask = new RenewalTimerTask(token);
- token.setTimerTask(tTask); // keep reference to the timer
+ // need to create new timer every time
+ TimerTask tTask = new RenewalTimerTask(token);
+ token.setTimerTask(tTask); // keep reference to the timer
- renewalTimer.schedule(token.timerTask, new Date(renewIn));
- } catch (Exception e) {
- LOG.warn("failed to schedule a task, token will not renew more", e);
- }
+ renewalTimer.schedule(token.timerTask, new Date(renewIn));
}
/**
@@ -391,33 +270,9 @@ public class DelegationTokenRenewal {
delegationTokens.clear();
}
-
- protected static void cancelDelegationTokenOverHttps(
- final Token token, final Configuration conf)
- throws InterruptedException, IOException{
- final String httpAddress = getHttpAddressForToken(token, conf);
- // will be chaged to debug
- LOG.info("address to cancel=" + httpAddress + "; tok=" + token.getService());
-
- UserGroupInformation.getLoginUser().doAs(
- new PrivilegedExceptionAction() {
- public Void run() throws IOException {
- DelegationTokenFetcher.cancelDelegationToken(httpAddress, token);
- return null;
- }
- });
- LOG.info("Cancel over HTTP done. addr="+httpAddress);
- }
-
-
// cancel a token
private static void cancelToken(DelegationTokenToRenew t) {
- Token token = t.token;
- Configuration conf = t.conf;
-
- if(token.getKind().equals(kindHdfs)) {
- dtCancelThread.cancelToken(token, conf);
- }
+ dtCancelThread.cancelToken(t.token, t.conf);
}
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
index 17a65415936..20c74f1e416 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/JobTokenIdentifier.java
@@ -25,6 +25,7 @@ import java.io.IOException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.security.UserGroupInformation;
@@ -35,7 +36,7 @@ import org.apache.hadoop.security.UserGroupInformation;
@InterfaceStability.Unstable
public class JobTokenIdentifier extends TokenIdentifier {
private Text jobid;
- final static Text KIND_NAME = new Text("mapreduce.job");
+ public final static Text KIND_NAME = new Text("mapreduce.job");
/**
* Default constructor
@@ -86,4 +87,12 @@ public class JobTokenIdentifier extends TokenIdentifier {
public void write(DataOutput out) throws IOException {
jobid.write(out);
}
+
+ @InterfaceAudience.Private
+ public static class Renewer extends Token.TrivialRenewer {
+ @Override
+ protected Text getKind() {
+ return KIND_NAME;
+ }
+ }
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
index a1d736abf1e..77e5817c916 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/security/token/delegation/DelegationTokenIdentifier.java
@@ -30,7 +30,7 @@ import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdenti
@InterfaceStability.Unstable
public class DelegationTokenIdentifier
extends AbstractDelegationTokenIdentifier {
- static final Text MAPREDUCE_DELEGATION_KIND =
+ public static final Text MAPREDUCE_DELEGATION_KIND =
new Text("MAPREDUCE_DELEGATION_TOKEN");
/**
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
index 9f588d55ac5..7ad08e956d9 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/ReduceContextImpl.java
@@ -176,11 +176,15 @@ public class ReduceContextImpl
return value;
}
+ BackupStore getBackupStore() {
+ return backupStore;
+ }
+
protected class ValueIterator implements ReduceContext.ValueIterator {
private boolean inReset = false;
private boolean clearMarkFlag = false;
-
+
@Override
public boolean hasNext() {
try {
@@ -247,7 +251,7 @@ public class ReduceContextImpl
@Override
public void mark() throws IOException {
- if (backupStore == null) {
+ if (getBackupStore() == null) {
backupStore = new BackupStore(conf, taskid);
}
isMarked = true;
@@ -290,7 +294,7 @@ public class ReduceContextImpl
@Override
public void clearMark() throws IOException {
- if (backupStore == null) {
+ if (getBackupStore() == null) {
return;
}
if (inReset) {
@@ -308,7 +312,7 @@ public class ReduceContextImpl
* @throws IOException
*/
public void resetBackupStore() throws IOException {
- if (backupStore == null) {
+ if (getBackupStore() == null) {
return;
}
inReset = isMarked = false;
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
index 8f0dad75d55..6facb47aa21 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/EventFetcher.java
@@ -60,7 +60,7 @@ class EventFetcher extends Thread {
LOG.info(reduce + " Thread started: " + getName());
try {
- while (true) {
+ while (true && !Thread.currentThread().isInterrupted()) {
try {
int numNewMaps = getMapCompletionEvents();
failures = 0;
@@ -68,7 +68,9 @@ class EventFetcher extends Thread {
LOG.info(reduce + ": " + "Got " + numNewMaps + " new map-outputs");
}
LOG.debug("GetMapEventsThread about to sleep for " + SLEEP_TIME);
- Thread.sleep(SLEEP_TIME);
+ if (!Thread.currentThread().isInterrupted()) {
+ Thread.sleep(SLEEP_TIME);
+ }
} catch (IOException ie) {
LOG.info("Exception in getting events", ie);
// check to see whether to abort
@@ -76,7 +78,9 @@ class EventFetcher extends Thread {
throw new IOException("too many failures downloading events", ie);
}
// sleep for a bit
- Thread.sleep(RETRY_PERIOD);
+ if (!Thread.currentThread().isInterrupted()) {
+ Thread.sleep(RETRY_PERIOD);
+ }
}
}
} catch (InterruptedException e) {
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
index 994251addb4..5a213f05c1a 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/task/reduce/Fetcher.java
@@ -135,7 +135,7 @@ class Fetcher extends Thread {
public void run() {
try {
- while (true) {
+ while (true && !Thread.currentThread().isInterrupted()) {
MapHost host = null;
try {
// If merge is on, block
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
index 464161bb938..7b255289b02 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/tools/CLI.java
@@ -42,9 +42,11 @@ import org.apache.hadoop.mapreduce.TaskReport;
import org.apache.hadoop.mapreduce.TaskTrackerInfo;
import org.apache.hadoop.mapreduce.TaskType;
import org.apache.hadoop.mapreduce.jobhistory.HistoryViewer;
+import org.apache.hadoop.mapreduce.v2.LogParams;
import org.apache.hadoop.security.AccessControlException;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
+import org.apache.hadoop.yarn.server.nodemanager.containermanager.logaggregation.LogDumper;
/**
* Interprets the map reduce cli options
@@ -53,6 +55,7 @@ import org.apache.hadoop.util.ToolRunner;
@InterfaceStability.Stable
public class CLI extends Configured implements Tool {
private static final Log LOG = LogFactory.getLog(CLI.class);
+ private Cluster cluster;
public CLI() {
}
@@ -94,6 +97,7 @@ public class CLI extends Configured implements Tool {
boolean killTask = false;
boolean failTask = false;
boolean setJobPriority = false;
+ boolean logs = false;
if ("-submit".equals(cmd)) {
if (argv.length != 2) {
@@ -204,13 +208,26 @@ public class CLI extends Configured implements Tool {
taskType = argv[2];
taskState = argv[3];
displayTasks = true;
+ } else if ("-logs".equals(cmd)) {
+ if (argv.length == 2 || argv.length ==3) {
+ logs = true;
+ jobid = argv[1];
+ if (argv.length == 3) {
+ taskid = argv[2];
+ } else {
+ taskid = null;
+ }
+ } else {
+ displayUsage(cmd);
+ return exitCode;
+ }
} else {
displayUsage(cmd);
return exitCode;
}
// initialize cluster
- Cluster cluster = new Cluster(getConf());
+ cluster = new Cluster(getConf());
// Submit the request
try {
@@ -312,6 +329,22 @@ public class CLI extends Configured implements Tool {
System.out.println("Could not fail task " + taskid);
exitCode = -1;
}
+ } else if (logs) {
+ try {
+ JobID jobID = JobID.forName(jobid);
+ TaskAttemptID taskAttemptID = TaskAttemptID.forName(taskid);
+ LogParams logParams = cluster.getLogParams(jobID, taskAttemptID);
+ LogDumper logDumper = new LogDumper();
+ logDumper.setConf(getConf());
+ logDumper.dumpAContainersLogs(logParams.getApplicationId(),
+ logParams.getContainerId(), logParams.getNodeId(),
+ logParams.getOwner());
+ } catch (IOException e) {
+ if (e instanceof RemoteException) {
+ throw e;
+ }
+ System.out.println(e.getMessage());
+ }
}
} catch (RemoteException re) {
IOException unwrappedException = re.unwrapRemoteException();
@@ -379,6 +412,10 @@ public class CLI extends Configured implements Tool {
" ]. " +
"Valid values for are " + taskTypes + ". " +
"Valid values for are " + taskStates);
+ } else if ("-logs".equals(cmd)) {
+ System.err.println(prefix + "[" + cmd +
+ "