Merge trunk into HDFS-1623 branch.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1623@1158072 13f79535-47bb-0310-9956-ffa450edef68
This commit is contained in:
Todd Lipcon 2011-08-16 00:37:15 +00:00
commit c65a01ea12
239 changed files with 10210 additions and 4310 deletions

View File

@ -53,16 +53,22 @@ if ! grep -qv '^a/\|^b/' $TMP ; then
sed -i -e 's,^[ab]/,,' $TMP
fi
# if all of the lines start with common/, hdfs/, or mapreduce/, this is
PREFIX_DIRS=$(cut -d '/' -f 1 $TMP | sort | uniq)
# if we are at the project root then nothing more to do
if [[ -d hadoop-common ]]; then
echo Looks like this is being run at project root
# if all of the lines start with hadoop-common/, hdfs/, or mapreduce/, this is
# relative to the hadoop root instead of the subproject root, so we need
# to chop off another layer
PREFIX_DIRS=$(cut -d '/' -f 1 $TMP | sort | uniq)
if [[ "$PREFIX_DIRS" =~ ^(hdfs|common|mapreduce)$ ]]; then
elif [[ "$PREFIX_DIRS" =~ ^(hdfs|hadoop-common|mapreduce)$ ]]; then
echo Looks like this is relative to project root. Increasing PLEVEL
PLEVEL=$[$PLEVEL + 1]
elif ! echo "$PREFIX_DIRS" | grep -vxq 'common\|hdfs\|mapreduce' ; then
echo Looks like this is a cross-subproject patch. Not supported!
elif ! echo "$PREFIX_DIRS" | grep -vxq 'hadoop-common\|hdfs\|mapreduce' ; then
echo Looks like this is a cross-subproject patch. Try applying from the project root
exit 1
fi

View File

@ -13,6 +13,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# The number of acceptable warning for *all* modules
# Please update the per-module test-patch.properties if you update this file.
OK_RELEASEAUDIT_WARNINGS=0
OK_FINDBUGS_WARNINGS=0
OK_JAVADOC_WARNINGS=0
OK_JAVADOC_WARNINGS=6

View File

@ -19,84 +19,152 @@ ulimit -n 1024
### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
### Read variables from properties file
bindir=$(dirname $0)
. $bindir/test-patch.properties
# Defaults
if [ -z "$MAVEN_HOME" ]; then
MVN=mvn
else
MVN=$MAVEN_HOME/bin/mvn
fi
PROJECT_NAME=Hadoop
JENKINS=false
PATCH_DIR=/tmp
SUPPORT_DIR=/tmp
BASEDIR=$(pwd)
PS=${PS:-ps}
AWK=${AWK:-awk}
WGET=${WGET:-wget}
SVN=${SVN:-svn}
GREP=${GREP:-grep}
PATCH=${PATCH:-patch}
JIRACLI=${JIRA:-jira}
FINDBUGS_HOME=${FINDBUGS_HOME}
FORREST_HOME=${FORREST_HOME}
ECLIPSE_HOME=${ECLIPSE_HOME}
###############################################################################
printUsage() {
echo "Usage: $0 [options] patch-file | defect-number"
echo
echo "Where:"
echo " patch-file is a local patch file containing the changes to test"
echo " defect-number is a JIRA defect number (e.g. 'HADOOP-1234') to test (Jenkins only)"
echo
echo "Options:"
echo "--patch-dir=<dir> The directory for working and output files (default '/tmp')"
echo "--basedir=<dir> The directory to apply the patch to (default current directory)"
echo "--mvn-cmd=<cmd> The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')"
echo "--ps-cmd=<cmd> The 'ps' command to use (default 'ps')"
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
echo "--svn-cmd=<cmd> The 'svn' command to use (default 'svn')"
echo "--grep-cmd=<cmd> The 'grep' command to use (default 'grep')"
echo "--patch-cmd=<cmd> The 'patch' command to use (default 'patch')"
echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
echo "--forrest-home=<path> Forrest home directory (default FORREST_HOME environment variable)"
echo "--dirty-workspace Allow the local SVN workspace to have uncommitted changes"
echo
echo "Jenkins-only options:"
echo "--jenkins Run by Jenkins (runs tests and posts results to JIRA)"
echo "--support-dir=<dir> The directory to find support files in"
echo "--wget-cmd=<cmd> The 'wget' command to use (default 'wget')"
echo "--jira-cmd=<cmd> The 'jira' command to use (default 'jira')"
echo "--jira-password=<pw> The password for the 'jira' command"
echo "--eclipse-home=<path> Eclipse home directory (default ECLIPSE_HOME environment variable)"
}
###############################################################################
parseArgs() {
case "$1" in
HUDSON)
### Set HUDSON to true to indicate that this script is being run by Hudson
HUDSON=true
if [[ $# != 16 ]] ; then
echo "ERROR: usage $0 HUDSON <PATCH_DIR> <SUPPORT_DIR> <PS_CMD> <WGET_CMD> <JIRACLI> <SVN_CMD> <GREP_CMD> <PATCH_CMD> <FINDBUGS_HOME> <FORREST_HOME> <ECLIPSE_HOME> <WORKSPACE_BASEDIR> <JIRA_PASSWD> <CURL_CMD> <DEFECT> "
cleanupAndExit 0
fi
PATCH_DIR=$2
SUPPORT_DIR=$3
PS=$4
WGET=$5
JIRACLI=$6
SVN=$7
GREP=$8
PATCH=$9
FINDBUGS_HOME=${10}
FORREST_HOME=${11}
ECLIPSE_HOME=${12}
BASEDIR=${13}
JIRA_PASSWD=${14}
CURL=${15}
defect=${16}
### Retrieve the defect number
if [ -z "$defect" ] ; then
echo "Could not determine the patch to test. Exiting."
cleanupAndExit 0
fi
if [ ! -e "$PATCH_DIR" ] ; then
mkdir -p $PATCH_DIR
fi
ECLIPSE_PROPERTY="-Declipse.home=$ECLIPSE_HOME"
for i in $*
do
case $i in
--jenkins)
JENKINS=true
;;
DEVELOPER)
### Set HUDSON to false to indicate that this script is being run by a developer
HUDSON=false
if [[ $# != 9 ]] ; then
echo "ERROR: usage $0 DEVELOPER <PATCH_FILE> <SCRATCH_DIR> <SVN_CMD> <GREP_CMD> <PATCH_CMD> <FINDBUGS_HOME> <FORREST_HOME> <WORKSPACE_BASEDIR>"
cleanupAndExit 0
fi
### PATCH_FILE contains the location of the patchfile
PATCH_FILE=$2
if [[ ! -e "$PATCH_FILE" ]] ; then
echo "Unable to locate the patch file $PATCH_FILE"
cleanupAndExit 0
fi
PATCH_DIR=$3
### Check if $PATCH_DIR exists. If it does not exist, create a new directory
if [[ ! -e "$PATCH_DIR" ]] ; then
mkdir "$PATCH_DIR"
if [[ $? == 0 ]] ; then
echo "$PATCH_DIR has been created"
else
echo "Unable to create $PATCH_DIR"
cleanupAndExit 0
fi
fi
SVN=$4
GREP=$5
PATCH=$6
FINDBUGS_HOME=$7
FORREST_HOME=$8
BASEDIR=$9
### Obtain the patch filename to append it to the version number
defect=`basename $PATCH_FILE`
--patch-dir=*)
PATCH_DIR=${i#*=}
;;
--support-dir=*)
SUPPORT_DIR=${i#*=}
;;
--basedir=*)
BASEDIR=${i#*=}
;;
--mvn-cmd=*)
MVN=${i#*=}
;;
--ps-cmd=*)
PS=${i#*=}
;;
--awk-cmd=*)
AWK=${i#*=}
;;
--wget-cmd=*)
WGET=${i#*=}
;;
--svn-cmd=*)
SVN=${i#*=}
;;
--grep-cmd=*)
GREP=${i#*=}
;;
--patch-cmd=*)
PATCH=${i#*=}
;;
--jira-cmd=*)
JIRACLI=${i#*=}
;;
--jira-password=*)
JIRA_PASSWD=${i#*=}
;;
--findbugs-home=*)
FINDBUGS_HOME=${i#*=}
;;
--forrest-home=*)
FORREST_HOME=${i#*=}
;;
--eclipse-home=*)
ECLIPSE_HOME=${i#*=}
;;
--dirty-workspace)
DIRTY_WORKSPACE=true
;;
*)
echo "ERROR: usage $0 HUDSON [args] | DEVELOPER [args]"
cleanupAndExit 0
PATCH_OR_DEFECT=$i
;;
esac
esac
done
if [ -z "$PATCH_OR_DEFECT" ]; then
printUsage
exit 1
fi
if [[ $JENKINS == "true" ]] ; then
echo "Running in Jenkins mode"
defect=$PATCH_OR_DEFECT
ECLIPSE_PROPERTY="-Declipse.home=$ECLIPSE_HOME"
else
echo "Running in developer mode"
JENKINS=false
### PATCH_FILE contains the location of the patchfile
PATCH_FILE=$PATCH_OR_DEFECT
if [[ ! -e "$PATCH_FILE" ]] ; then
echo "Unable to locate the patch file $PATCH_FILE"
cleanupAndExit 0
fi
### Check if $PATCH_DIR exists. If it does not exist, create a new directory
if [[ ! -e "$PATCH_DIR" ]] ; then
mkdir "$PATCH_DIR"
if [[ $? == 0 ]] ; then
echo "$PATCH_DIR has been created"
else
echo "Unable to create $PATCH_DIR"
cleanupAndExit 0
fi
fi
### Obtain the patch filename to append it to the version number
defect=`basename $PATCH_FILE`
fi
}
###############################################################################
@ -111,9 +179,10 @@ checkout () {
echo ""
echo ""
### When run by a developer, if the workspace contains modifications, do not continue
### unless the --dirty-workspace option was set
status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'`
if [[ $HUDSON == "false" ]] ; then
if [[ "$status" != "" ]] ; then
if [[ $JENKINS == "false" ]] ; then
if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then
echo "ERROR: can't run in a workspace that contains the following modifications"
echo "$status"
cleanupAndExit 1
@ -131,7 +200,7 @@ checkout () {
###############################################################################
setup () {
### Download latest patch file (ignoring .htm and .html) when run from patch process
if [[ $HUDSON == "true" ]] ; then
if [[ $JENKINS == "true" ]] ; then
$WGET -q -O $PATCH_DIR/jira http://issues.apache.org/jira/browse/$defect
if [[ `$GREP -c 'Patch Available' $PATCH_DIR/jira` == 0 ]] ; then
echo "$defect is not \"Patch Available\". Exiting."
@ -162,6 +231,7 @@ setup () {
cleanupAndExit 0
fi
fi
. $BASEDIR/dev-support/test-patch.properties
### exit if warnings are NOT defined in the properties file
if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]]; then
echo "Please define the following properties in test-patch.properties file"
@ -179,9 +249,8 @@ setup () {
echo "======================================================================"
echo ""
echo ""
# echo "$ANT_HOME/bin/ant -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -D${PROJECT_NAME}PatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
# $ANT_HOME/bin/ant -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -D${PROJECT_NAME}PatchProcess= clean tar > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
$MAVEN_HOME/bin/mvn clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
if [[ $? != 0 ]] ; then
echo "Trunk compilation is broken?"
cleanupAndExit 1
@ -229,7 +298,7 @@ checkTests () {
testReferences=`$GREP -c -i '/test' $PATCH_DIR/patch`
echo "There appear to be $testReferences test files referenced in the patch."
if [[ $testReferences == 0 ]] ; then
if [[ $HUDSON == "true" ]] ; then
if [[ $JENKINS == "true" ]] ; then
patchIsDoc=`$GREP -c -i 'title="documentation' $PATCH_DIR/jira`
if [[ $patchIsDoc != 0 ]] ; then
echo "The patch appears to be a documentation patch that doesn't require tests."
@ -297,12 +366,15 @@ checkJavadocWarnings () {
echo "======================================================================"
echo ""
echo ""
echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt"
(cd root; mvn install -DskipTests)
(cd doclet; mvn install -DskipTests)
#$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= clean javadoc | tee $PATCH_DIR/patchJavadocWarnings.txt
$MAVEN_HOME/bin/mvn clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | awk '/Javadoc Warnings/,EOF' | $GREP -v 'Javadoc Warnings' | awk 'BEGIN {total = 0} {total += 1} END {print total}'`
echo "$MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1"
if [ -d hadoop-project ]; then
(cd hadoop-project; $MVN install)
fi
if [ -d hadoop-annotations ]; then
(cd hadoop-annotations; $MVN install)
fi
$MVN clean compile javadoc:javadoc -DskipTests -Pdocs -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchJavadocWarnings.txt 2>&1
javadocWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavadocWarnings.txt | $AWK '/Javadoc Warnings/,EOF' | $GREP warning | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
echo ""
echo ""
echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
@ -332,9 +404,8 @@ checkJavacWarnings () {
echo "======================================================================"
echo ""
echo ""
#echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
#$ANT_HOME/bin/ant -Dversion="${VERSION}" -Djavac.args="-Xlint -Xmaxwarns 1000" $ECLIPSE_PROPERTY -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= clean tar > $PATCH_DIR/patchJavacWarnings.txt 2>&1
$MAVEN_HOME/bin/mvn clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
echo "$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1"
$MVN clean compile -DskipTests -D${PROJECT_NAME}PatchProcess -Ptest-patch > $PATCH_DIR/patchJavacWarnings.txt 2>&1
if [[ $? != 0 ]] ; then
JIRA_COMMENT="$JIRA_COMMENT
@ -343,8 +414,8 @@ checkJavacWarnings () {
fi
### Compare trunk and patch javac warning numbers
if [[ -f $PATCH_DIR/patchJavacWarnings.txt ]] ; then
trunkJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/trunkJavacWarnings.txt | awk 'BEGIN {total = 0} {total += 1} END {print total}'`
patchJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavacWarnings.txt | awk 'BEGIN {total = 0} {total += 1} END {print total}'`
trunkJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/trunkJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
patchJavacWarnings=`$GREP '\[WARNING\]' $PATCH_DIR/patchJavacWarnings.txt | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
echo "There appear to be $trunkJavacWarnings javac compiler warnings before the patch and $patchJavacWarnings javac compiler warnings after applying the patch."
if [[ $patchJavacWarnings != "" && $trunkJavacWarnings != "" ]] ; then
if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
@ -373,9 +444,8 @@ checkReleaseAuditWarnings () {
echo "======================================================================"
echo ""
echo ""
#echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1"
#$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= releaseaudit > $PATCH_DIR/patchReleaseAuditWarnings.txt 2>&1
$MAVEN_HOME/bin/mvn apache-rat:check -D${PROJECT_NAME}PatchProcess 2>&1
echo "$MVN apache-rat:check -D${PROJECT_NAME}PatchProcess 2>&1"
$MVN apache-rat:check -D${PROJECT_NAME}PatchProcess 2>&1
find . -name rat.txt | xargs cat > $PATCH_DIR/patchReleaseAuditWarnings.txt
### Compare trunk and patch release audit warning numbers
@ -418,9 +488,8 @@ checkStyle () {
echo "THIS IS NOT IMPLEMENTED YET"
echo ""
echo ""
#echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle"
#$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= checkstyle
$MAVEN_HOME/bin/mvn compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess
echo "$MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess"
$MVN compile checkstyle:checkstyle -D${PROJECT_NAME}PatchProcess
JIRA_COMMENT_FOOTER="Checkstyle results: $BUILD_URL/artifact/trunk/build/test/checkstyle-errors.html
$JIRA_COMMENT_FOOTER"
@ -451,9 +520,8 @@ checkFindbugsWarnings () {
echo "======================================================================"
echo ""
echo ""
#echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=$FINDBUGS_HOME -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs"
#$ANT_HOME/bin/ant -Dversion="${VERSION}" -Dfindbugs.home=${FINDBUGS_HOME} -Dforrest.home=${FORREST_HOME} -DHadoopPatchProcess= findbugs
$MAVEN_HOME/bin/mvn clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess -X
echo "$MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess"
$MVN clean compile findbugs:findbugs -D${PROJECT_NAME}PatchProcess
if [ $? != 0 ] ; then
JIRA_COMMENT="$JIRA_COMMENT
@ -461,18 +529,29 @@ checkFindbugsWarnings () {
-1 findbugs. The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
return 1
fi
JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/target/newPatchFindbugsWarnings.html
findbugsWarnings=0
for file in $(find $BASEDIR -name findbugsXml.xml)
do
relative_file=${file#$BASEDIR/} # strip leading $BASEDIR prefix
if [ ! $relative_file == "target/findbugsXml.xml" ]; then
module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path
fi
cp $file $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml
$FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \
$PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \
$PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml
newFindbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \
$PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml | $AWK '{print $1}'`
echo "Found $newFindbugsWarnings Findbugs warnings ($file)"
findbugsWarnings=$((findbugsWarnings+newFindbugsWarnings))
$FINDBUGS_HOME/bin/convertXmlToText -html \
$PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml \
$PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html
JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/target/newPatchFindbugsWarnings${module_suffix}.html
$JIRA_COMMENT_FOOTER"
cp $BASEDIR/hadoop-common/target/findbugsXml.xml $PATCH_DIR/patchFindbugsWarnings.xml
$FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \
$PATCH_DIR/patchFindbugsWarnings.xml \
$PATCH_DIR/patchFindbugsWarnings.xml
findbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/patchFindbugsWarnings.xml \
$PATCH_DIR/newPatchFindbugsWarnings.xml | /usr/bin/awk '{print $1}'`
$FINDBUGS_HOME/bin/convertXmlToText -html \
$PATCH_DIR/newPatchFindbugsWarnings.xml \
$PATCH_DIR/newPatchFindbugsWarnings.html
done
### if current warnings greater than OK_FINDBUGS_WARNINGS
if [[ $findbugsWarnings > $OK_FINDBUGS_WARNINGS ]] ; then
@ -501,15 +580,14 @@ runCoreTests () {
echo ""
### Kill any rogue build processes from the last attempt
$PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
PreTestTarget=""
if [[ $defect == MAPREDUCE-* ]] ; then
PreTestTarget="create-c++-configure"
fi
#echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME $PreTestTarget test-core"
#$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME $PreTestTarget test-core
$MAVEN_HOME/bin/mvn clean test -Pnative -DHadoopPatchProcess
echo "$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess"
$MVN clean test -Pnative -D${PROJECT_NAME}PatchProcess
if [[ $? != 0 ]] ; then
### Find and format names of failed tests
failed_tests=`grep -l -E "<failure|<error" $WORKSPACE/trunk/target/hadoop-common/surefire-reports/*.xml | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`
@ -544,7 +622,7 @@ runContribTests () {
fi
### Kill any rogue build processes from the last attempt
$PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
#echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib"
#$ANT_HOME/bin/ant -Dversion="${VERSION}" $ECLIPSE_PROPERTY -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no test-contrib
@ -575,7 +653,7 @@ checkInjectSystemFaults () {
echo ""
### Kill any rogue build processes from the last attempt
$PS auxwww | $GREP HadoopPatchProcess | /usr/bin/nawk '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
#echo "$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults"
#$ANT_HOME/bin/ant -Dversion="${VERSION}" -DHadoopPatchProcess= -Dtest.junit.output.format=xml -Dtest.output=no -Dcompile.c++=yes -Dforrest.home=$FORREST_HOME inject-system-faults
@ -597,7 +675,7 @@ checkInjectSystemFaults () {
submitJiraComment () {
local result=$1
### Do not output the value of JIRA_COMMENT_FOOTER when run by a developer
if [[ $HUDSON == "false" ]] ; then
if [[ $JENKINS == "false" ]] ; then
JIRA_COMMENT_FOOTER=""
fi
if [[ $result == 0 ]] ; then
@ -616,7 +694,7 @@ $JIRA_COMMENT_FOOTER"
$comment"
if [[ $HUDSON == "true" ]] ; then
if [[ $JENKINS == "true" ]] ; then
echo ""
echo ""
echo "======================================================================"
@ -637,7 +715,7 @@ $comment"
### Cleanup files
cleanupAndExit () {
local result=$1
if [[ $HUDSON == "true" ]] ; then
if [[ $JENKINS == "true" ]] ; then
if [ -e "$PATCH_DIR" ] ; then
mv $PATCH_DIR $BASEDIR
fi
@ -669,7 +747,7 @@ cd $BASEDIR
checkout
RESULT=$?
if [[ $HUDSON == "true" ]] ; then
if [[ $JENKINS == "true" ]] ; then
if [[ $RESULT != 0 ]] ; then
exit 100
fi
@ -678,7 +756,7 @@ setup
checkAuthor
RESULT=$?
if [[ $HUDSON == "true" ]] ; then
if [[ $JENKINS == "true" ]] ; then
cleanUpXml
fi
checkTests
@ -700,7 +778,7 @@ checkFindbugsWarnings
checkReleaseAuditWarnings
(( RESULT = RESULT + $? ))
### Do not call these when run by a developer
if [[ $HUDSON == "true" ]] ; then
if [[ $JENKINS == "true" ]] ; then
runCoreTests
(( RESULT = RESULT + $? ))
runContribTests

View File

@ -67,9 +67,6 @@
<requireJavaVersion>
<version>1.6</version>
</requireJavaVersion>
<requireOS>
<family>unix</family>
</requireOS>
</rules>
</configuration>
<executions>

View File

@ -309,11 +309,29 @@ Trunk (unreleased changes)
HADOOP-7501. Publish Hadoop Common artifacts (post HADOOP-6671) to Apache
SNAPSHOTs repo. (Alejandro Abdelnur via tomwhite)
HADOOP-7525. Make arguments to test-patch optional. (tomwhite)
HADOOP-7472. RPC client should deal with IP address change.
(Kihwal Lee via suresh)
HADOOP-7499. Add method for doing a sanity check on hostnames in NetUtils.
(Jeffrey Naisbit via mahadev)
HADOOP-6158. Move CyclicIteration to HDFS. (eli)
HADOOP-7526. Add TestPath tests for URI conversion and reserved
characters. (eli)
HADOOP-7531. Add servlet util methods for handling paths in requests. (eli)
OPTIMIZATIONS
HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
via todd)
HADOOP-7445. Implement bulk checksum verification using efficient native
code. (todd)
BUG FIXES
HADOOP-7327. FileSystem.listStatus() throws NullPointerException instead of
@ -451,6 +469,34 @@ Trunk (unreleased changes)
HADOOP-7508. Compiled nativelib is in wrong directory and it is not picked
up by surefire setup. (Alejandro Abdelnur via tomwhite)
HADOOP-7520. Fix to add distribution management info to hadoop-main
(Alejandro Abdelnur via gkesavan)
HADOOP-7515. test-patch reports the wrong number of javadoc warnings.
(tomwhite)
HADOOP-7523. Test org.apache.hadoop.fs.TestFilterFileSystem fails due to
java.lang.NoSuchMethodException. (John Lee via tomwhite)
HADOOP-7528. Maven build fails in Windows. (Alejandro Abdelnur via
tomwhite)
HADOOP-7533. Allow test-patch to be run from any subproject directory.
(tomwhite)
HADOOP-7512. Fix example mistake in WritableComparable javadocs.
(Harsh J via eli)
HADOOP-7357. hadoop.io.compress.TestCodec#main() should exit with
non-zero exit code if test failed. (Philip Zeyliger via eli)
HADOOP-6622. Token should not print the password in toString. (eli)
HADOOP-7529. Fix lock cycles in metrics system. (llu)
HADOOP-7545. Common -tests JAR should not include properties and configs.
(todd)
Release 0.22.0 - Unreleased

View File

@ -242,3 +242,12 @@ For the org.apache.hadoop.util.bloom.* classes:
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
For portions of the native implementation of slicing-by-8 CRC calculation
in src/main/native/src/org/apache/hadoop/util:
/**
* Copyright 2008,2009,2010 Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by a
* BSD-style license that can be found in the LICENSE file.
*/

View File

@ -0,0 +1,21 @@
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# The number of acceptable warning for this module
# Please update the root test-patch.properties if you update this file.
OK_RELEASEAUDIT_WARNINGS=0
OK_FINDBUGS_WARNINGS=0
OK_JAVADOC_WARNINGS=6

View File

@ -279,11 +279,23 @@
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
<id>prepare-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>jar</goal>
</goals>
</execution>
<execution>
<id>prepare-test-jar</id>
<phase>prepare-package</phase>
<goals>
<goal>test-jar</goal>
</goals>
<configuration>
<includes>
<include>**/*.class</include>
</includes>
</configuration>
</execution>
</executions>
</plugin>
@ -545,6 +557,7 @@
<javahClassName>org.apache.hadoop.security.JniBasedUnixGroupsNetgroupMapping</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyCompressor</javahClassName>
<javahClassName>org.apache.hadoop.io.compress.snappy.SnappyDecompressor</javahClassName>
<javahClassName>org.apache.hadoop.util.NativeCrc32</javahClassName>
</javahClassNames>
<javahOutputDirectory>${project.build.directory}/native/javah</javahOutputDirectory>
</configuration>
@ -852,11 +865,17 @@
<!-- Using Unix script to preserve symlinks -->
<echo file="${project.build.directory}/tar-copynativelibs.sh">
which cygpath 2> /dev/null
if [ $? = 1 ]; then
BUILD_DIR="${project.build.directory}"
else
BUILD_DIR=`cygpath --unix '${project.build.directory}'`
fi
TAR='tar cf -'
UNTAR='tar xfBp -'
LIB_DIR="${project.build.directory}/native/target/usr/local/lib"
LIB_DIR="${BUILD_DIR}/native/target/usr/local/lib"
if [ -d $${LIB_DIR} ] ; then
TARGET_DIR="${project.build.directory}/${project.artifactId}-${project.version}/lib/native/${build.platform}"
TARGET_DIR="${BUILD_DIR}/${project.artifactId}-${project.version}/lib/native/${build.platform}"
mkdir -p $${TARGET_DIR}
cd $${LIB_DIR}
$$TAR *hadoop* | (cd $${TARGET_DIR}/; $$UNTAR)
@ -880,11 +899,20 @@
</goals>
<configuration>
<target>
<!-- Using Unix tar to preserve symlinks -->
<exec executable="tar" dir="${project.build.directory}" failonerror="yes">
<arg value="czf"/>
<arg value="${project.build.directory}/${project.artifactId}-${project.version}.tar.gz"/>
<arg value="${project.artifactId}-${project.version}"/>
<!-- Using Unix script to preserve symlinks -->
<echo file="${project.build.directory}/tar-maketar.sh">
which cygpath 2> /dev/null
if [ $? = 1 ]; then
BUILD_DIR="${project.build.directory}"
else
BUILD_DIR=`cygpath --unix '${project.build.directory}'`
fi
cd ${BUILD_DIR}
tar czf ${project.artifactId}-${project.version}.tar.gz ${project.artifactId}-${project.version}
</echo>
<exec executable="sh" dir="${project.build.directory}" failonerror="true">
<arg line="./tar-maketar.sh"/>
</exec>
</target>
</configuration>
@ -945,11 +973,17 @@
<!-- Using Unix script to preserve symlinks -->
<echo file="${project.build.directory}/bintar-copynativelibs.sh">
which cygpath 2> /dev/null
if [ $? = 1 ]; then
BUILD_DIR="${project.build.directory}"
else
BUILD_DIR=`cygpath --unix '${project.build.directory}'`
fi
TAR='tar cf -'
UNTAR='tar xfBp -'
LIB_DIR="${project.build.directory}/native/target/usr/local/lib"
LIB_DIR="${BUILD_DIR}/native/target/usr/local/lib"
if [ -d $${LIB_DIR} ] ; then
TARGET_DIR="${project.build.directory}/${project.artifactId}-${project.version}-bin/lib"
TARGET_DIR="${BUILD_DIR}/${project.artifactId}-${project.version}-bin/lib"
mkdir -p $${TARGET_DIR}
cd $${LIB_DIR}
$$TAR *hadoop* | (cd $${TARGET_DIR}/; $$UNTAR)
@ -973,11 +1007,20 @@
</goals>
<configuration>
<target>
<!-- Using Unix tar to preserve symlinks -->
<exec executable="tar" dir="${project.build.directory}" failonerror="yes">
<arg value="czf"/>
<arg value="${project.build.directory}/${project.artifactId}-${project.version}-bin.tar.gz"/>
<arg value="${project.artifactId}-${project.version}-bin"/>
<!-- Using Unix script to preserve symlinks -->
<echo file="${project.build.directory}/bintar-maketar.sh">
which cygpath 2> /dev/null
if [ $? = 1 ]; then
BUILD_DIR="${project.build.directory}"
else
BUILD_DIR=`cygpath --unix '${project.build.directory}'`
fi
cd ${BUILD_DIR}
tar czf ${project.artifactId}-${project.version}-bin.tar.gz ${project.artifactId}-${project.version}-bin
</echo>
<exec executable="sh" dir="${project.build.directory}" failonerror="true">
<arg line="./bintar-maketar.sh"/>
</exec>
</target>
</configuration>

View File

@ -52,7 +52,7 @@ import org.apache.hadoop.classification.InterfaceStability;
* timestamp = in.readLong();
* }
*
* public int compareTo(MyWritableComparable w) {
* public int compareTo(MyWritableComparable o) {
* int thisValue = this.value;
* int thatValue = ((IntWritable)o).value;
* return (thisValue &lt; thatValue ? -1 : (thisValue==thatValue ? 0 : 1));

View File

@ -405,6 +405,27 @@ public class Client {
saslRpcClient = new SaslRpcClient(authMethod, token, serverPrincipal);
return saslRpcClient.saslConnect(in2, out2);
}
/**
* Update the server address if the address corresponding to the host
* name has changed.
*
* @return true if an addr change was detected.
* @throws IOException when the hostname cannot be resolved.
*/
private synchronized boolean updateAddress() throws IOException {
// Do a fresh lookup with the old host name.
InetSocketAddress currentAddr = new InetSocketAddress(
server.getHostName(), server.getPort());
if (!server.equals(currentAddr)) {
LOG.warn("Address change detected. Old: " + server.toString() +
" New: " + currentAddr.toString());
server = currentAddr;
return true;
}
return false;
}
private synchronized void setupConnection() throws IOException {
short ioFailures = 0;
@ -435,19 +456,28 @@ public class Client {
}
// connection time out is 20s
NetUtils.connect(this.socket, remoteId.getAddress(), 20000);
NetUtils.connect(this.socket, server, 20000);
if (rpcTimeout > 0) {
pingInterval = rpcTimeout; // rpcTimeout overwrites pingInterval
}
this.socket.setSoTimeout(pingInterval);
return;
} catch (SocketTimeoutException toe) {
/* Check for an address change and update the local reference.
* Reset the failure counter if the address was changed
*/
if (updateAddress()) {
timeoutFailures = ioFailures = 0;
}
/*
* The max number of retries is 45, which amounts to 20s*45 = 15
* minutes retries.
*/
handleConnectionFailure(timeoutFailures++, 45, toe);
} catch (IOException ie) {
if (updateAddress()) {
timeoutFailures = ioFailures = 0;
}
handleConnectionFailure(ioFailures++, maxRetries, ie);
}
}

View File

@ -18,6 +18,7 @@
package org.apache.hadoop.metrics2.lib;
import java.util.concurrent.atomic.AtomicReference;
import javax.management.ObjectName;
import org.apache.hadoop.classification.InterfaceAudience;
@ -34,7 +35,8 @@ import org.apache.hadoop.metrics2.impl.MetricsSystemImpl;
public enum DefaultMetricsSystem {
INSTANCE; // the singleton
private MetricsSystem impl = new MetricsSystemImpl();
private AtomicReference<MetricsSystem> impl =
new AtomicReference<MetricsSystem>(new MetricsSystemImpl());
volatile boolean miniClusterMode = false;
final UniqueNames mBeanNames = new UniqueNames();
final UniqueNames sourceNames = new UniqueNames();
@ -48,8 +50,8 @@ public enum DefaultMetricsSystem {
return INSTANCE.init(prefix);
}
synchronized MetricsSystem init(String prefix) {
return impl.init(prefix);
MetricsSystem init(String prefix) {
return impl.get().init(prefix);
}
/**
@ -66,8 +68,9 @@ public enum DefaultMetricsSystem {
INSTANCE.shutdownInstance();
}
synchronized void shutdownInstance() {
if (impl.shutdown()) {
void shutdownInstance() {
boolean last = impl.get().shutdown();
if (last) synchronized(this) {
mBeanNames.map.clear();
sourceNames.map.clear();
}
@ -78,13 +81,11 @@ public enum DefaultMetricsSystem {
return INSTANCE.setImpl(ms);
}
synchronized MetricsSystem setImpl(MetricsSystem ms) {
MetricsSystem old = impl;
impl = ms;
return old;
MetricsSystem setImpl(MetricsSystem ms) {
return impl.getAndSet(ms);
}
synchronized MetricsSystem getImpl() { return impl; }
MetricsSystem getImpl() { return impl.get(); }
@InterfaceAudience.Private
public static void setMiniClusterMode(boolean choice) {

View File

@ -17,9 +17,6 @@
*/
package org.apache.hadoop.net;
import java.net.InetAddress;
import java.net.SocketException;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
@ -91,32 +88,6 @@ public class CachedDNSToSwitchMapping implements DNSToSwitchMapping {
return result;
}
/**
* Resolves host names and adds them to the cache.
* Unlike the 'resolve" method, this won't hide UnknownHostExceptions
*
* @param names to resolve
* @return List of resolved names
* @throws UnknownHostException if any hosts cannot be resolved
*/
public List<String> resolveValidHosts(List<String> names)
throws UnknownHostException {
if (names.isEmpty()) {
return new ArrayList<String>();
}
List<String> addresses = new ArrayList<String>(names.size());
for (String name : names) {
addresses.add(InetAddress.getByName(name).getHostAddress());
}
List<String> uncachedHosts = this.getUncachedHosts(names);
// Resolve the uncached hosts
List<String> resolvedHosts = rawMapping.resolveValidHosts(uncachedHosts);
this.cacheResolvedHosts(uncachedHosts, resolvedHosts);
return this.getCachedHosts(addresses);
}
public List<String> resolve(List<String> names) {
// normalize all input names to be in the form of IP addresses
names = NetUtils.normalizeHostNames(names);

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.net;
import java.util.List;
import java.net.UnknownHostException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
@ -45,23 +44,4 @@ public interface DNSToSwitchMapping {
* @return list of resolved network paths
*/
public List<String> resolve(List<String> names);
/**
* Resolves a list of DNS-names/IP-addresses and returns back a list of
* switch information (network paths). One-to-one correspondence must be
* maintained between the elements in the lists.
* Consider an element in the argument list - x.y.com. The switch information
* that is returned must be a network path of the form /foo/rack,
* where / is the root, and 'foo' is the switch where 'rack' is connected.
* Note the hostname/ip-address is not part of the returned path.
* The network topology of the cluster would determine the number of
* components in the network path. Unlike 'resolve', names must be
* resolvable
* @param names
* @return list of resolved network paths
* @throws UnknownHostException if any hosts are not resolvable
*/
public List<String> resolveValidHosts(List<String> names)
throws UnknownHostException;
}

View File

@ -27,6 +27,7 @@ import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.UnknownHostException;
import java.net.ConnectException;
import java.nio.channels.SocketChannel;
@ -428,6 +429,35 @@ public class NetUtils {
return hostNames;
}
/**
* Performs a sanity check on the list of hostnames/IPs to verify they at least
* appear to be valid.
* @param names - List of hostnames/IPs
* @throws UnknownHostException
*/
public static void verifyHostnames(String[] names) throws UnknownHostException {
for (String name: names) {
if (name == null) {
throw new UnknownHostException("null hostname found");
}
// The first check supports URL formats (e.g. hdfs://, etc.).
// java.net.URI requires a schema, so we add a dummy one if it doesn't
// have one already.
URI uri = null;
try {
uri = new URI(name);
if (uri.getHost() == null) {
uri = new URI("http://" + name);
}
} catch (URISyntaxException e) {
uri = null;
}
if (uri == null || uri.getHost() == null) {
throw new UnknownHostException(name + " is not a valid Inet address");
}
}
}
private static final Pattern ipPattern = // Pattern for matching hostname to ip:port
Pattern.compile("\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}:?\\d*");

View File

@ -20,7 +20,6 @@ package org.apache.hadoop.net;
import java.util.*;
import java.io.*;
import java.net.UnknownHostException;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -123,17 +122,6 @@ implements Configurable
return m;
}
public List<String> resolveValidHosts(List<String> names)
throws UnknownHostException {
List<String> result = this.resolve(names);
if (result != null) {
return result;
} else {
throw new UnknownHostException(
"Unknown host(s) returned from ScriptBasedMapping");
}
}
private String runResolveCommand(List<String> args) {
int loopCount = 0;

View File

@ -238,8 +238,6 @@ public class Token<T extends TokenIdentifier> implements Writable {
StringBuilder buffer = new StringBuilder();
buffer.append("Ident: ");
addBinaryBuffer(buffer, identifier);
buffer.append(", Pass: ");
addBinaryBuffer(buffer, password);
buffer.append(", Kind: ");
buffer.append(kind.toString());
buffer.append(", Service: ");

View File

@ -265,6 +265,11 @@ public class DataChecksum implements Checksum {
fileName, basePos);
return;
}
if (NativeCrc32.isAvailable()) {
NativeCrc32.verifyChunkedSums(bytesPerChecksum, type, checksums, data,
fileName, basePos);
return;
}
int startDataPos = data.position();
data.mark();

View File

@ -0,0 +1,73 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
import java.nio.ByteBuffer;
import org.apache.hadoop.fs.ChecksumException;
/**
* Wrapper around JNI support code to do checksum computation
* natively.
*/
class NativeCrc32 {
/**
* Return true if the JNI-based native CRC extensions are available.
*/
public static boolean isAvailable() {
return NativeCodeLoader.isNativeCodeLoaded();
}
/**
* Verify the given buffers of data and checksums, and throw an exception
* if any checksum is invalid. The buffers given to this function should
* have their position initially at the start of the data, and their limit
* set at the end of the data. The position, limit, and mark are not
* modified.
*
* @param bytesPerSum the chunk size (eg 512 bytes)
* @param checksumType the DataChecksum type constant
* @param sums the DirectByteBuffer pointing at the beginning of the
* stored checksums
* @param data the DirectByteBuffer pointing at the beginning of the
* data to check
* @param basePos the position in the file where the data buffer starts
* @param fileName the name of the file being verified
* @throws ChecksumException if there is an invalid checksum
*/
public static void verifyChunkedSums(int bytesPerSum, int checksumType,
ByteBuffer sums, ByteBuffer data, String fileName, long basePos)
throws ChecksumException {
nativeVerifyChunkedSums(bytesPerSum, checksumType,
sums, sums.position(),
data, data.position(), data.remaining(),
fileName, basePos);
}
private static native void nativeVerifyChunkedSums(
int bytesPerSum, int checksumType,
ByteBuffer sums, int sumsOffset,
ByteBuffer data, int dataOffset, int dataLength,
String fileName, long basePos);
// Copy the constants over from DataChecksum so that javah will pick them up
// and make them available in the native code header.
public static final int CHECKSUM_CRC32 = DataChecksum.CHECKSUM_CRC32;
public static final int CHECKSUM_CRC32C = DataChecksum.CHECKSUM_CRC32C;
}

View File

@ -21,10 +21,15 @@ import java.io.*;
import java.util.Calendar;
import javax.servlet.*;
import javax.servlet.http.HttpServletRequest;
import org.apache.commons.httpclient.URIException;
import org.apache.commons.httpclient.util.URIUtil;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import com.google.common.base.Preconditions;
@InterfaceAudience.Private
@InterfaceStability.Unstable
public class ServletUtil {
@ -107,4 +112,55 @@ public class ServletUtil {
public static String percentageGraph(float perc, int width) throws IOException {
return percentageGraph((int)perc, width);
}
}
/**
* Escape and encode a string regarded as within the query component of an URI.
* @param value the value to encode
* @return encoded query, null if the default charset is not supported
*/
public static String encodeQueryValue(final String value) {
try {
return URIUtil.encodeWithinQuery(value, "UTF-8");
} catch (URIException e) {
throw new AssertionError("JVM does not support UTF-8"); // should never happen!
}
}
/**
* Escape and encode a string regarded as the path component of an URI.
* @param path the path component to encode
* @return encoded path, null if UTF-8 is not supported
*/
public static String encodePath(final String path) {
try {
return URIUtil.encodePath(path, "UTF-8");
} catch (URIException e) {
throw new AssertionError("JVM does not support UTF-8"); // should never happen!
}
}
/**
* Parse and decode the path component from the given request.
* @param request Http request to parse
* @param servletName the name of servlet that precedes the path
* @return decoded path component, null if UTF-8 is not supported
*/
public static String getDecodedPath(final HttpServletRequest request, String servletName) {
try {
return URIUtil.decode(getRawPath(request, servletName), "UTF-8");
} catch (URIException e) {
throw new AssertionError("JVM does not support UTF-8"); // should never happen!
}
}
/**
* Parse the path component from the given request and return w/o decoding.
* @param request Http request to parse
* @param servletName the name of servlet that precedes the path
* @return path component, null if the default charset is not supported
*/
public static String getRawPath(final HttpServletRequest request, String servletName) {
Preconditions.checkArgument(request.getRequestURI().startsWith(servletName+"/"));
return request.getRequestURI().substring(servletName.length());
}
}

View File

@ -51,7 +51,9 @@ libhadoop_la_SOURCES = src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c \
src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c \
src/org/apache/hadoop/io/nativeio/file_descriptor.c \
src/org/apache/hadoop/io/nativeio/errno_enum.c \
src/org/apache/hadoop/io/nativeio/NativeIO.c
src/org/apache/hadoop/io/nativeio/NativeIO.c \
src/org/apache/hadoop/util/NativeCrc32.c \
src/org/apache/hadoop/util/bulk_crc32.c
libhadoop_la_LDFLAGS = -version-info 1:0:0 $(AM_LDFLAGS)
libhadoop_la_LIBADD = -ldl -ljvm

View File

@ -0,0 +1,154 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// get the autoconf settings
#include "config.h"
#include <arpa/inet.h>
#include <assert.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <unistd.h>
#include "org_apache_hadoop.h"
#include "org_apache_hadoop_util_NativeCrc32.h"
#include "gcc_optimizations.h"
#include "bulk_crc32.h"
static void throw_checksum_exception(JNIEnv *env,
uint32_t got_crc, uint32_t expected_crc,
jstring j_filename, jlong pos) {
char message[1024];
jstring jstr_message;
char *filename;
// Get filename as C string, or "null" if not provided
if (j_filename == NULL) {
filename = strdup("null");
} else {
const char *c_filename = (*env)->GetStringUTFChars(env, j_filename, NULL);
if (c_filename == NULL) {
return; // OOME already thrown
}
filename = strdup(c_filename);
(*env)->ReleaseStringUTFChars(env, j_filename, c_filename);
}
// Format error message
snprintf(message, sizeof(message),
"Checksum error: %s at %ld exp: %d got: %d",
filename, pos, expected_crc, got_crc);
if ((jstr_message = (*env)->NewStringUTF(env, message)) == NULL) {
goto cleanup;
}
// Throw exception
jclass checksum_exception_clazz = (*env)->FindClass(
env, "org/apache/hadoop/fs/ChecksumException");
if (checksum_exception_clazz == NULL) {
goto cleanup;
}
jmethodID checksum_exception_ctor = (*env)->GetMethodID(env,
checksum_exception_clazz, "<init>",
"(Ljava/lang/String;J)V");
if (checksum_exception_ctor == NULL) {
goto cleanup;
}
jthrowable obj = (jthrowable)(*env)->NewObject(env, checksum_exception_clazz,
checksum_exception_ctor, jstr_message, pos);
if (obj == NULL) goto cleanup;
(*env)->Throw(env, obj);
cleanup:
if (filename != NULL) {
free(filename);
}
}
static int convert_java_crc_type(JNIEnv *env, jint crc_type) {
switch (crc_type) {
case org_apache_hadoop_util_NativeCrc32_CHECKSUM_CRC32:
return CRC32_ZLIB_POLYNOMIAL;
case org_apache_hadoop_util_NativeCrc32_CHECKSUM_CRC32C:
return CRC32C_POLYNOMIAL;
default:
THROW(env, "java/lang/IllegalArgumentException",
"Invalid checksum type");
return -1;
}
}
JNIEXPORT void JNICALL Java_org_apache_hadoop_util_NativeCrc32_nativeVerifyChunkedSums
(JNIEnv *env, jclass clazz,
jint bytes_per_checksum, jint j_crc_type,
jobject j_sums, jint sums_offset,
jobject j_data, jint data_offset, jint data_len,
jstring j_filename, jlong base_pos)
{
if (unlikely(!j_sums || !j_data)) {
THROW(env, "java/lang/NullPointerException",
"input ByteBuffers must not be null");
return;
}
// Convert direct byte buffers to C pointers
uint8_t *sums_addr = (*env)->GetDirectBufferAddress(env, j_sums);
uint8_t *data_addr = (*env)->GetDirectBufferAddress(env, j_data);
if (unlikely(!sums_addr || !data_addr)) {
THROW(env, "java/lang/IllegalArgumentException",
"input ByteBuffers must be direct buffers");
return;
}
if (unlikely(sums_offset < 0 || data_offset < 0 || data_len < 0)) {
THROW(env, "java/lang/IllegalArgumentException",
"bad offsets or lengths");
return;
}
uint32_t *sums = (uint32_t *)(sums_addr + sums_offset);
uint8_t *data = data_addr + data_offset;
// Convert to correct internal C constant for CRC type
int crc_type = convert_java_crc_type(env, j_crc_type);
if (crc_type == -1) return; // exception already thrown
// Setup complete. Actually verify checksums.
crc32_error_t error_data;
int ret = bulk_verify_crc(data, data_len, sums, crc_type,
bytes_per_checksum, &error_data);
if (likely(ret == CHECKSUMS_VALID)) {
return;
} else if (unlikely(ret == INVALID_CHECKSUM_DETECTED)) {
long pos = base_pos + (error_data.bad_data - data);
throw_checksum_exception(
env, error_data.got_crc, error_data.expected_crc,
j_filename, pos);
} else {
THROW(env, "java/lang/AssertionError",
"Bad response code from native bulk_verify_crc");
}
}
/**
* vim: sw=2: ts=2: et:
*/

View File

@ -0,0 +1,156 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Portions of this file are from http://www.evanjones.ca/crc32c.html under
* the BSD license:
* Copyright 2008,2009,2010 Massachusetts Institute of Technology.
* All rights reserved. Use of this source code is governed by a
* BSD-style license that can be found in the LICENSE file.
*/
#include <arpa/inet.h>
#include <stdint.h>
#include <unistd.h>
#include "crc32_zlib_polynomial_tables.h"
#include "crc32c_tables.h"
#include "bulk_crc32.h"
#include "gcc_optimizations.h"
typedef uint32_t (*crc_update_func_t)(uint32_t, const uint8_t *, size_t);
static uint32_t crc_init();
static uint32_t crc_val(uint32_t crc);
static uint32_t crc32_zlib_sb8(uint32_t crc, const uint8_t *buf, size_t length);
static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length);
int bulk_verify_crc(const uint8_t *data, size_t data_len,
const uint32_t *sums, int checksum_type,
int bytes_per_checksum,
crc32_error_t *error_info) {
crc_update_func_t crc_update_func;
switch (checksum_type) {
case CRC32_ZLIB_POLYNOMIAL:
crc_update_func = crc32_zlib_sb8;
break;
case CRC32C_POLYNOMIAL:
crc_update_func = crc32c_sb8;
break;
default:
return INVALID_CHECKSUM_TYPE;
}
while (likely(data_len > 0)) {
int len = likely(data_len >= bytes_per_checksum) ? bytes_per_checksum : data_len;
uint32_t crc = crc_init();
crc = crc_update_func(crc, data, len);
crc = ntohl(crc_val(crc));
if (unlikely(crc != *sums)) {
if (error_info != NULL) {
error_info->got_crc = crc;
error_info->expected_crc = *sums;
error_info->bad_data = data;
}
return INVALID_CHECKSUM_DETECTED;
}
data += len;
data_len -= len;
sums++;
}
return CHECKSUMS_VALID;
}
/**
* Initialize a CRC
*/
static uint32_t crc_init() {
return 0xffffffff;
}
/**
* Extract the final result of a CRC
*/
static uint32_t crc_val(uint32_t crc) {
return ~crc;
}
/**
* Computes the CRC32c checksum for the specified buffer using the slicing by 8
* algorithm over 64 bit quantities.
*/
static uint32_t crc32c_sb8(uint32_t crc, const uint8_t *buf, size_t length) {
uint32_t running_length = ((length)/8)*8;
uint32_t end_bytes = length - running_length;
int li;
for (li=0; li < running_length/8; li++) {
crc ^= *(uint32_t *)buf;
buf += 4;
uint32_t term1 = CRC32C_T8_7[crc & 0x000000FF] ^
CRC32C_T8_6[(crc >> 8) & 0x000000FF];
uint32_t term2 = crc >> 16;
crc = term1 ^
CRC32C_T8_5[term2 & 0x000000FF] ^
CRC32C_T8_4[(term2 >> 8) & 0x000000FF];
term1 = CRC32C_T8_3[(*(uint32_t *)buf) & 0x000000FF] ^
CRC32C_T8_2[((*(uint32_t *)buf) >> 8) & 0x000000FF];
term2 = (*(uint32_t *)buf) >> 16;
crc = crc ^
term1 ^
CRC32C_T8_1[term2 & 0x000000FF] ^
CRC32C_T8_0[(term2 >> 8) & 0x000000FF];
buf += 4;
}
for (li=0; li < end_bytes; li++) {
crc = CRC32C_T8_0[(crc ^ *buf++) & 0x000000FF] ^ (crc >> 8);
}
return crc;
}
/**
* Update a CRC using the "zlib" polynomial -- what Hadoop calls CHECKSUM_CRC32
* using slicing-by-8
*/
static uint32_t crc32_zlib_sb8(
uint32_t crc, const uint8_t *buf, size_t length) {
uint32_t running_length = ((length)/8)*8;
uint32_t end_bytes = length - running_length;
int li;
for (li=0; li < running_length/8; li++) {
crc ^= *(uint32_t *)buf;
buf += 4;
uint32_t term1 = CRC32_T8_7[crc & 0x000000FF] ^
CRC32_T8_6[(crc >> 8) & 0x000000FF];
uint32_t term2 = crc >> 16;
crc = term1 ^
CRC32_T8_5[term2 & 0x000000FF] ^
CRC32_T8_4[(term2 >> 8) & 0x000000FF];
term1 = CRC32_T8_3[(*(uint32_t *)buf) & 0x000000FF] ^
CRC32_T8_2[((*(uint32_t *)buf) >> 8) & 0x000000FF];
term2 = (*(uint32_t *)buf) >> 16;
crc = crc ^
term1 ^
CRC32_T8_1[term2 & 0x000000FF] ^
CRC32_T8_0[(term2 >> 8) & 0x000000FF];
buf += 4;
}
for (li=0; li < end_bytes; li++) {
crc = CRC32_T8_0[(crc ^ *buf++) & 0x000000FF] ^ (crc >> 8);
}
return crc;
}

View File

@ -0,0 +1,57 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef BULK_CRC32_H_INCLUDED
#define BULK_CRC32_H_INCLUDED
#include <stdint.h>
// Constants for different CRC algorithms
#define CRC32C_POLYNOMIAL 1
#define CRC32_ZLIB_POLYNOMIAL 2
// Return codes for bulk_verify_crc
#define CHECKSUMS_VALID 0
#define INVALID_CHECKSUM_DETECTED -1
#define INVALID_CHECKSUM_TYPE -2
// Return type for bulk verification when verification fails
typedef struct crc32_error {
uint32_t got_crc;
uint32_t expected_crc;
const uint8_t *bad_data; // pointer to start of data chunk with error
} crc32_error_t;
/**
* Verify a buffer of data which is checksummed in chunks
* of bytes_per_checksum bytes. The checksums are each 32 bits
* and are stored in sequential indexes of the 'sums' array.
*
* checksum_type - one of the CRC32 constants defined above
* error_info - if non-NULL, will be filled in if an error
* is detected
*
* Returns: 0 for success, non-zero for an error, result codes
* for which are defined above
*/
extern int bulk_verify_crc(const uint8_t *data, size_t data_len,
const uint32_t *sums, int checksum_type,
int bytes_per_checksum,
crc32_error_t *error_info);
#endif

View File

@ -0,0 +1,552 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* CRC-32 lookup tables generated by the polynomial 0xEDB88320.
* See also TestPureJavaCrc32.Table.
*/
const uint32_t CRC32_T8_0[] = {
0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA,
0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3,
0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988,
0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91,
0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE,
0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7,
0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC,
0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5,
0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172,
0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B,
0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940,
0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59,
0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116,
0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F,
0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924,
0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D,
0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A,
0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433,
0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818,
0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01,
0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E,
0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457,
0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C,
0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65,
0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2,
0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB,
0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0,
0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9,
0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086,
0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F,
0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4,
0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD,
0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A,
0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683,
0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8,
0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1,
0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE,
0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7,
0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC,
0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5,
0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252,
0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B,
0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60,
0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79,
0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236,
0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F,
0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04,
0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D,
0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A,
0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713,
0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38,
0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21,
0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E,
0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777,
0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C,
0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45,
0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2,
0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB,
0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0,
0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9,
0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6,
0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF,
0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94,
0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D
};
const uint32_t CRC32_T8_1[] = {
0x00000000, 0x191B3141, 0x32366282, 0x2B2D53C3,
0x646CC504, 0x7D77F445, 0x565AA786, 0x4F4196C7,
0xC8D98A08, 0xD1C2BB49, 0xFAEFE88A, 0xE3F4D9CB,
0xACB54F0C, 0xB5AE7E4D, 0x9E832D8E, 0x87981CCF,
0x4AC21251, 0x53D92310, 0x78F470D3, 0x61EF4192,
0x2EAED755, 0x37B5E614, 0x1C98B5D7, 0x05838496,
0x821B9859, 0x9B00A918, 0xB02DFADB, 0xA936CB9A,
0xE6775D5D, 0xFF6C6C1C, 0xD4413FDF, 0xCD5A0E9E,
0x958424A2, 0x8C9F15E3, 0xA7B24620, 0xBEA97761,
0xF1E8E1A6, 0xE8F3D0E7, 0xC3DE8324, 0xDAC5B265,
0x5D5DAEAA, 0x44469FEB, 0x6F6BCC28, 0x7670FD69,
0x39316BAE, 0x202A5AEF, 0x0B07092C, 0x121C386D,
0xDF4636F3, 0xC65D07B2, 0xED705471, 0xF46B6530,
0xBB2AF3F7, 0xA231C2B6, 0x891C9175, 0x9007A034,
0x179FBCFB, 0x0E848DBA, 0x25A9DE79, 0x3CB2EF38,
0x73F379FF, 0x6AE848BE, 0x41C51B7D, 0x58DE2A3C,
0xF0794F05, 0xE9627E44, 0xC24F2D87, 0xDB541CC6,
0x94158A01, 0x8D0EBB40, 0xA623E883, 0xBF38D9C2,
0x38A0C50D, 0x21BBF44C, 0x0A96A78F, 0x138D96CE,
0x5CCC0009, 0x45D73148, 0x6EFA628B, 0x77E153CA,
0xBABB5D54, 0xA3A06C15, 0x888D3FD6, 0x91960E97,
0xDED79850, 0xC7CCA911, 0xECE1FAD2, 0xF5FACB93,
0x7262D75C, 0x6B79E61D, 0x4054B5DE, 0x594F849F,
0x160E1258, 0x0F152319, 0x243870DA, 0x3D23419B,
0x65FD6BA7, 0x7CE65AE6, 0x57CB0925, 0x4ED03864,
0x0191AEA3, 0x188A9FE2, 0x33A7CC21, 0x2ABCFD60,
0xAD24E1AF, 0xB43FD0EE, 0x9F12832D, 0x8609B26C,
0xC94824AB, 0xD05315EA, 0xFB7E4629, 0xE2657768,
0x2F3F79F6, 0x362448B7, 0x1D091B74, 0x04122A35,
0x4B53BCF2, 0x52488DB3, 0x7965DE70, 0x607EEF31,
0xE7E6F3FE, 0xFEFDC2BF, 0xD5D0917C, 0xCCCBA03D,
0x838A36FA, 0x9A9107BB, 0xB1BC5478, 0xA8A76539,
0x3B83984B, 0x2298A90A, 0x09B5FAC9, 0x10AECB88,
0x5FEF5D4F, 0x46F46C0E, 0x6DD93FCD, 0x74C20E8C,
0xF35A1243, 0xEA412302, 0xC16C70C1, 0xD8774180,
0x9736D747, 0x8E2DE606, 0xA500B5C5, 0xBC1B8484,
0x71418A1A, 0x685ABB5B, 0x4377E898, 0x5A6CD9D9,
0x152D4F1E, 0x0C367E5F, 0x271B2D9C, 0x3E001CDD,
0xB9980012, 0xA0833153, 0x8BAE6290, 0x92B553D1,
0xDDF4C516, 0xC4EFF457, 0xEFC2A794, 0xF6D996D5,
0xAE07BCE9, 0xB71C8DA8, 0x9C31DE6B, 0x852AEF2A,
0xCA6B79ED, 0xD37048AC, 0xF85D1B6F, 0xE1462A2E,
0x66DE36E1, 0x7FC507A0, 0x54E85463, 0x4DF36522,
0x02B2F3E5, 0x1BA9C2A4, 0x30849167, 0x299FA026,
0xE4C5AEB8, 0xFDDE9FF9, 0xD6F3CC3A, 0xCFE8FD7B,
0x80A96BBC, 0x99B25AFD, 0xB29F093E, 0xAB84387F,
0x2C1C24B0, 0x350715F1, 0x1E2A4632, 0x07317773,
0x4870E1B4, 0x516BD0F5, 0x7A468336, 0x635DB277,
0xCBFAD74E, 0xD2E1E60F, 0xF9CCB5CC, 0xE0D7848D,
0xAF96124A, 0xB68D230B, 0x9DA070C8, 0x84BB4189,
0x03235D46, 0x1A386C07, 0x31153FC4, 0x280E0E85,
0x674F9842, 0x7E54A903, 0x5579FAC0, 0x4C62CB81,
0x8138C51F, 0x9823F45E, 0xB30EA79D, 0xAA1596DC,
0xE554001B, 0xFC4F315A, 0xD7626299, 0xCE7953D8,
0x49E14F17, 0x50FA7E56, 0x7BD72D95, 0x62CC1CD4,
0x2D8D8A13, 0x3496BB52, 0x1FBBE891, 0x06A0D9D0,
0x5E7EF3EC, 0x4765C2AD, 0x6C48916E, 0x7553A02F,
0x3A1236E8, 0x230907A9, 0x0824546A, 0x113F652B,
0x96A779E4, 0x8FBC48A5, 0xA4911B66, 0xBD8A2A27,
0xF2CBBCE0, 0xEBD08DA1, 0xC0FDDE62, 0xD9E6EF23,
0x14BCE1BD, 0x0DA7D0FC, 0x268A833F, 0x3F91B27E,
0x70D024B9, 0x69CB15F8, 0x42E6463B, 0x5BFD777A,
0xDC656BB5, 0xC57E5AF4, 0xEE530937, 0xF7483876,
0xB809AEB1, 0xA1129FF0, 0x8A3FCC33, 0x9324FD72
};
const uint32_t CRC32_T8_2[] = {
0x00000000, 0x01C26A37, 0x0384D46E, 0x0246BE59,
0x0709A8DC, 0x06CBC2EB, 0x048D7CB2, 0x054F1685,
0x0E1351B8, 0x0FD13B8F, 0x0D9785D6, 0x0C55EFE1,
0x091AF964, 0x08D89353, 0x0A9E2D0A, 0x0B5C473D,
0x1C26A370, 0x1DE4C947, 0x1FA2771E, 0x1E601D29,
0x1B2F0BAC, 0x1AED619B, 0x18ABDFC2, 0x1969B5F5,
0x1235F2C8, 0x13F798FF, 0x11B126A6, 0x10734C91,
0x153C5A14, 0x14FE3023, 0x16B88E7A, 0x177AE44D,
0x384D46E0, 0x398F2CD7, 0x3BC9928E, 0x3A0BF8B9,
0x3F44EE3C, 0x3E86840B, 0x3CC03A52, 0x3D025065,
0x365E1758, 0x379C7D6F, 0x35DAC336, 0x3418A901,
0x3157BF84, 0x3095D5B3, 0x32D36BEA, 0x331101DD,
0x246BE590, 0x25A98FA7, 0x27EF31FE, 0x262D5BC9,
0x23624D4C, 0x22A0277B, 0x20E69922, 0x2124F315,
0x2A78B428, 0x2BBADE1F, 0x29FC6046, 0x283E0A71,
0x2D711CF4, 0x2CB376C3, 0x2EF5C89A, 0x2F37A2AD,
0x709A8DC0, 0x7158E7F7, 0x731E59AE, 0x72DC3399,
0x7793251C, 0x76514F2B, 0x7417F172, 0x75D59B45,
0x7E89DC78, 0x7F4BB64F, 0x7D0D0816, 0x7CCF6221,
0x798074A4, 0x78421E93, 0x7A04A0CA, 0x7BC6CAFD,
0x6CBC2EB0, 0x6D7E4487, 0x6F38FADE, 0x6EFA90E9,
0x6BB5866C, 0x6A77EC5B, 0x68315202, 0x69F33835,
0x62AF7F08, 0x636D153F, 0x612BAB66, 0x60E9C151,
0x65A6D7D4, 0x6464BDE3, 0x662203BA, 0x67E0698D,
0x48D7CB20, 0x4915A117, 0x4B531F4E, 0x4A917579,
0x4FDE63FC, 0x4E1C09CB, 0x4C5AB792, 0x4D98DDA5,
0x46C49A98, 0x4706F0AF, 0x45404EF6, 0x448224C1,
0x41CD3244, 0x400F5873, 0x4249E62A, 0x438B8C1D,
0x54F16850, 0x55330267, 0x5775BC3E, 0x56B7D609,
0x53F8C08C, 0x523AAABB, 0x507C14E2, 0x51BE7ED5,
0x5AE239E8, 0x5B2053DF, 0x5966ED86, 0x58A487B1,
0x5DEB9134, 0x5C29FB03, 0x5E6F455A, 0x5FAD2F6D,
0xE1351B80, 0xE0F771B7, 0xE2B1CFEE, 0xE373A5D9,
0xE63CB35C, 0xE7FED96B, 0xE5B86732, 0xE47A0D05,
0xEF264A38, 0xEEE4200F, 0xECA29E56, 0xED60F461,
0xE82FE2E4, 0xE9ED88D3, 0xEBAB368A, 0xEA695CBD,
0xFD13B8F0, 0xFCD1D2C7, 0xFE976C9E, 0xFF5506A9,
0xFA1A102C, 0xFBD87A1B, 0xF99EC442, 0xF85CAE75,
0xF300E948, 0xF2C2837F, 0xF0843D26, 0xF1465711,
0xF4094194, 0xF5CB2BA3, 0xF78D95FA, 0xF64FFFCD,
0xD9785D60, 0xD8BA3757, 0xDAFC890E, 0xDB3EE339,
0xDE71F5BC, 0xDFB39F8B, 0xDDF521D2, 0xDC374BE5,
0xD76B0CD8, 0xD6A966EF, 0xD4EFD8B6, 0xD52DB281,
0xD062A404, 0xD1A0CE33, 0xD3E6706A, 0xD2241A5D,
0xC55EFE10, 0xC49C9427, 0xC6DA2A7E, 0xC7184049,
0xC25756CC, 0xC3953CFB, 0xC1D382A2, 0xC011E895,
0xCB4DAFA8, 0xCA8FC59F, 0xC8C97BC6, 0xC90B11F1,
0xCC440774, 0xCD866D43, 0xCFC0D31A, 0xCE02B92D,
0x91AF9640, 0x906DFC77, 0x922B422E, 0x93E92819,
0x96A63E9C, 0x976454AB, 0x9522EAF2, 0x94E080C5,
0x9FBCC7F8, 0x9E7EADCF, 0x9C381396, 0x9DFA79A1,
0x98B56F24, 0x99770513, 0x9B31BB4A, 0x9AF3D17D,
0x8D893530, 0x8C4B5F07, 0x8E0DE15E, 0x8FCF8B69,
0x8A809DEC, 0x8B42F7DB, 0x89044982, 0x88C623B5,
0x839A6488, 0x82580EBF, 0x801EB0E6, 0x81DCDAD1,
0x8493CC54, 0x8551A663, 0x8717183A, 0x86D5720D,
0xA9E2D0A0, 0xA820BA97, 0xAA6604CE, 0xABA46EF9,
0xAEEB787C, 0xAF29124B, 0xAD6FAC12, 0xACADC625,
0xA7F18118, 0xA633EB2F, 0xA4755576, 0xA5B73F41,
0xA0F829C4, 0xA13A43F3, 0xA37CFDAA, 0xA2BE979D,
0xB5C473D0, 0xB40619E7, 0xB640A7BE, 0xB782CD89,
0xB2CDDB0C, 0xB30FB13B, 0xB1490F62, 0xB08B6555,
0xBBD72268, 0xBA15485F, 0xB853F606, 0xB9919C31,
0xBCDE8AB4, 0xBD1CE083, 0xBF5A5EDA, 0xBE9834ED
};
const uint32_t CRC32_T8_3[] = {
0x00000000, 0xB8BC6765, 0xAA09C88B, 0x12B5AFEE,
0x8F629757, 0x37DEF032, 0x256B5FDC, 0x9DD738B9,
0xC5B428EF, 0x7D084F8A, 0x6FBDE064, 0xD7018701,
0x4AD6BFB8, 0xF26AD8DD, 0xE0DF7733, 0x58631056,
0x5019579F, 0xE8A530FA, 0xFA109F14, 0x42ACF871,
0xDF7BC0C8, 0x67C7A7AD, 0x75720843, 0xCDCE6F26,
0x95AD7F70, 0x2D111815, 0x3FA4B7FB, 0x8718D09E,
0x1ACFE827, 0xA2738F42, 0xB0C620AC, 0x087A47C9,
0xA032AF3E, 0x188EC85B, 0x0A3B67B5, 0xB28700D0,
0x2F503869, 0x97EC5F0C, 0x8559F0E2, 0x3DE59787,
0x658687D1, 0xDD3AE0B4, 0xCF8F4F5A, 0x7733283F,
0xEAE41086, 0x525877E3, 0x40EDD80D, 0xF851BF68,
0xF02BF8A1, 0x48979FC4, 0x5A22302A, 0xE29E574F,
0x7F496FF6, 0xC7F50893, 0xD540A77D, 0x6DFCC018,
0x359FD04E, 0x8D23B72B, 0x9F9618C5, 0x272A7FA0,
0xBAFD4719, 0x0241207C, 0x10F48F92, 0xA848E8F7,
0x9B14583D, 0x23A83F58, 0x311D90B6, 0x89A1F7D3,
0x1476CF6A, 0xACCAA80F, 0xBE7F07E1, 0x06C36084,
0x5EA070D2, 0xE61C17B7, 0xF4A9B859, 0x4C15DF3C,
0xD1C2E785, 0x697E80E0, 0x7BCB2F0E, 0xC377486B,
0xCB0D0FA2, 0x73B168C7, 0x6104C729, 0xD9B8A04C,
0x446F98F5, 0xFCD3FF90, 0xEE66507E, 0x56DA371B,
0x0EB9274D, 0xB6054028, 0xA4B0EFC6, 0x1C0C88A3,
0x81DBB01A, 0x3967D77F, 0x2BD27891, 0x936E1FF4,
0x3B26F703, 0x839A9066, 0x912F3F88, 0x299358ED,
0xB4446054, 0x0CF80731, 0x1E4DA8DF, 0xA6F1CFBA,
0xFE92DFEC, 0x462EB889, 0x549B1767, 0xEC277002,
0x71F048BB, 0xC94C2FDE, 0xDBF98030, 0x6345E755,
0x6B3FA09C, 0xD383C7F9, 0xC1366817, 0x798A0F72,
0xE45D37CB, 0x5CE150AE, 0x4E54FF40, 0xF6E89825,
0xAE8B8873, 0x1637EF16, 0x048240F8, 0xBC3E279D,
0x21E91F24, 0x99557841, 0x8BE0D7AF, 0x335CB0CA,
0xED59B63B, 0x55E5D15E, 0x47507EB0, 0xFFEC19D5,
0x623B216C, 0xDA874609, 0xC832E9E7, 0x708E8E82,
0x28ED9ED4, 0x9051F9B1, 0x82E4565F, 0x3A58313A,
0xA78F0983, 0x1F336EE6, 0x0D86C108, 0xB53AA66D,
0xBD40E1A4, 0x05FC86C1, 0x1749292F, 0xAFF54E4A,
0x322276F3, 0x8A9E1196, 0x982BBE78, 0x2097D91D,
0x78F4C94B, 0xC048AE2E, 0xD2FD01C0, 0x6A4166A5,
0xF7965E1C, 0x4F2A3979, 0x5D9F9697, 0xE523F1F2,
0x4D6B1905, 0xF5D77E60, 0xE762D18E, 0x5FDEB6EB,
0xC2098E52, 0x7AB5E937, 0x680046D9, 0xD0BC21BC,
0x88DF31EA, 0x3063568F, 0x22D6F961, 0x9A6A9E04,
0x07BDA6BD, 0xBF01C1D8, 0xADB46E36, 0x15080953,
0x1D724E9A, 0xA5CE29FF, 0xB77B8611, 0x0FC7E174,
0x9210D9CD, 0x2AACBEA8, 0x38191146, 0x80A57623,
0xD8C66675, 0x607A0110, 0x72CFAEFE, 0xCA73C99B,
0x57A4F122, 0xEF189647, 0xFDAD39A9, 0x45115ECC,
0x764DEE06, 0xCEF18963, 0xDC44268D, 0x64F841E8,
0xF92F7951, 0x41931E34, 0x5326B1DA, 0xEB9AD6BF,
0xB3F9C6E9, 0x0B45A18C, 0x19F00E62, 0xA14C6907,
0x3C9B51BE, 0x842736DB, 0x96929935, 0x2E2EFE50,
0x2654B999, 0x9EE8DEFC, 0x8C5D7112, 0x34E11677,
0xA9362ECE, 0x118A49AB, 0x033FE645, 0xBB838120,
0xE3E09176, 0x5B5CF613, 0x49E959FD, 0xF1553E98,
0x6C820621, 0xD43E6144, 0xC68BCEAA, 0x7E37A9CF,
0xD67F4138, 0x6EC3265D, 0x7C7689B3, 0xC4CAEED6,
0x591DD66F, 0xE1A1B10A, 0xF3141EE4, 0x4BA87981,
0x13CB69D7, 0xAB770EB2, 0xB9C2A15C, 0x017EC639,
0x9CA9FE80, 0x241599E5, 0x36A0360B, 0x8E1C516E,
0x866616A7, 0x3EDA71C2, 0x2C6FDE2C, 0x94D3B949,
0x090481F0, 0xB1B8E695, 0xA30D497B, 0x1BB12E1E,
0x43D23E48, 0xFB6E592D, 0xE9DBF6C3, 0x516791A6,
0xCCB0A91F, 0x740CCE7A, 0x66B96194, 0xDE0506F1
};
const uint32_t CRC32_T8_4[] = {
0x00000000, 0x3D6029B0, 0x7AC05360, 0x47A07AD0,
0xF580A6C0, 0xC8E08F70, 0x8F40F5A0, 0xB220DC10,
0x30704BC1, 0x0D106271, 0x4AB018A1, 0x77D03111,
0xC5F0ED01, 0xF890C4B1, 0xBF30BE61, 0x825097D1,
0x60E09782, 0x5D80BE32, 0x1A20C4E2, 0x2740ED52,
0x95603142, 0xA80018F2, 0xEFA06222, 0xD2C04B92,
0x5090DC43, 0x6DF0F5F3, 0x2A508F23, 0x1730A693,
0xA5107A83, 0x98705333, 0xDFD029E3, 0xE2B00053,
0xC1C12F04, 0xFCA106B4, 0xBB017C64, 0x866155D4,
0x344189C4, 0x0921A074, 0x4E81DAA4, 0x73E1F314,
0xF1B164C5, 0xCCD14D75, 0x8B7137A5, 0xB6111E15,
0x0431C205, 0x3951EBB5, 0x7EF19165, 0x4391B8D5,
0xA121B886, 0x9C419136, 0xDBE1EBE6, 0xE681C256,
0x54A11E46, 0x69C137F6, 0x2E614D26, 0x13016496,
0x9151F347, 0xAC31DAF7, 0xEB91A027, 0xD6F18997,
0x64D15587, 0x59B17C37, 0x1E1106E7, 0x23712F57,
0x58F35849, 0x659371F9, 0x22330B29, 0x1F532299,
0xAD73FE89, 0x9013D739, 0xD7B3ADE9, 0xEAD38459,
0x68831388, 0x55E33A38, 0x124340E8, 0x2F236958,
0x9D03B548, 0xA0639CF8, 0xE7C3E628, 0xDAA3CF98,
0x3813CFCB, 0x0573E67B, 0x42D39CAB, 0x7FB3B51B,
0xCD93690B, 0xF0F340BB, 0xB7533A6B, 0x8A3313DB,
0x0863840A, 0x3503ADBA, 0x72A3D76A, 0x4FC3FEDA,
0xFDE322CA, 0xC0830B7A, 0x872371AA, 0xBA43581A,
0x9932774D, 0xA4525EFD, 0xE3F2242D, 0xDE920D9D,
0x6CB2D18D, 0x51D2F83D, 0x167282ED, 0x2B12AB5D,
0xA9423C8C, 0x9422153C, 0xD3826FEC, 0xEEE2465C,
0x5CC29A4C, 0x61A2B3FC, 0x2602C92C, 0x1B62E09C,
0xF9D2E0CF, 0xC4B2C97F, 0x8312B3AF, 0xBE729A1F,
0x0C52460F, 0x31326FBF, 0x7692156F, 0x4BF23CDF,
0xC9A2AB0E, 0xF4C282BE, 0xB362F86E, 0x8E02D1DE,
0x3C220DCE, 0x0142247E, 0x46E25EAE, 0x7B82771E,
0xB1E6B092, 0x8C869922, 0xCB26E3F2, 0xF646CA42,
0x44661652, 0x79063FE2, 0x3EA64532, 0x03C66C82,
0x8196FB53, 0xBCF6D2E3, 0xFB56A833, 0xC6368183,
0x74165D93, 0x49767423, 0x0ED60EF3, 0x33B62743,
0xD1062710, 0xEC660EA0, 0xABC67470, 0x96A65DC0,
0x248681D0, 0x19E6A860, 0x5E46D2B0, 0x6326FB00,
0xE1766CD1, 0xDC164561, 0x9BB63FB1, 0xA6D61601,
0x14F6CA11, 0x2996E3A1, 0x6E369971, 0x5356B0C1,
0x70279F96, 0x4D47B626, 0x0AE7CCF6, 0x3787E546,
0x85A73956, 0xB8C710E6, 0xFF676A36, 0xC2074386,
0x4057D457, 0x7D37FDE7, 0x3A978737, 0x07F7AE87,
0xB5D77297, 0x88B75B27, 0xCF1721F7, 0xF2770847,
0x10C70814, 0x2DA721A4, 0x6A075B74, 0x576772C4,
0xE547AED4, 0xD8278764, 0x9F87FDB4, 0xA2E7D404,
0x20B743D5, 0x1DD76A65, 0x5A7710B5, 0x67173905,
0xD537E515, 0xE857CCA5, 0xAFF7B675, 0x92979FC5,
0xE915E8DB, 0xD475C16B, 0x93D5BBBB, 0xAEB5920B,
0x1C954E1B, 0x21F567AB, 0x66551D7B, 0x5B3534CB,
0xD965A31A, 0xE4058AAA, 0xA3A5F07A, 0x9EC5D9CA,
0x2CE505DA, 0x11852C6A, 0x562556BA, 0x6B457F0A,
0x89F57F59, 0xB49556E9, 0xF3352C39, 0xCE550589,
0x7C75D999, 0x4115F029, 0x06B58AF9, 0x3BD5A349,
0xB9853498, 0x84E51D28, 0xC34567F8, 0xFE254E48,
0x4C059258, 0x7165BBE8, 0x36C5C138, 0x0BA5E888,
0x28D4C7DF, 0x15B4EE6F, 0x521494BF, 0x6F74BD0F,
0xDD54611F, 0xE03448AF, 0xA794327F, 0x9AF41BCF,
0x18A48C1E, 0x25C4A5AE, 0x6264DF7E, 0x5F04F6CE,
0xED242ADE, 0xD044036E, 0x97E479BE, 0xAA84500E,
0x4834505D, 0x755479ED, 0x32F4033D, 0x0F942A8D,
0xBDB4F69D, 0x80D4DF2D, 0xC774A5FD, 0xFA148C4D,
0x78441B9C, 0x4524322C, 0x028448FC, 0x3FE4614C,
0x8DC4BD5C, 0xB0A494EC, 0xF704EE3C, 0xCA64C78C
};
const uint32_t CRC32_T8_5[] = {
0x00000000, 0xCB5CD3A5, 0x4DC8A10B, 0x869472AE,
0x9B914216, 0x50CD91B3, 0xD659E31D, 0x1D0530B8,
0xEC53826D, 0x270F51C8, 0xA19B2366, 0x6AC7F0C3,
0x77C2C07B, 0xBC9E13DE, 0x3A0A6170, 0xF156B2D5,
0x03D6029B, 0xC88AD13E, 0x4E1EA390, 0x85427035,
0x9847408D, 0x531B9328, 0xD58FE186, 0x1ED33223,
0xEF8580F6, 0x24D95353, 0xA24D21FD, 0x6911F258,
0x7414C2E0, 0xBF481145, 0x39DC63EB, 0xF280B04E,
0x07AC0536, 0xCCF0D693, 0x4A64A43D, 0x81387798,
0x9C3D4720, 0x57619485, 0xD1F5E62B, 0x1AA9358E,
0xEBFF875B, 0x20A354FE, 0xA6372650, 0x6D6BF5F5,
0x706EC54D, 0xBB3216E8, 0x3DA66446, 0xF6FAB7E3,
0x047A07AD, 0xCF26D408, 0x49B2A6A6, 0x82EE7503,
0x9FEB45BB, 0x54B7961E, 0xD223E4B0, 0x197F3715,
0xE82985C0, 0x23755665, 0xA5E124CB, 0x6EBDF76E,
0x73B8C7D6, 0xB8E41473, 0x3E7066DD, 0xF52CB578,
0x0F580A6C, 0xC404D9C9, 0x4290AB67, 0x89CC78C2,
0x94C9487A, 0x5F959BDF, 0xD901E971, 0x125D3AD4,
0xE30B8801, 0x28575BA4, 0xAEC3290A, 0x659FFAAF,
0x789ACA17, 0xB3C619B2, 0x35526B1C, 0xFE0EB8B9,
0x0C8E08F7, 0xC7D2DB52, 0x4146A9FC, 0x8A1A7A59,
0x971F4AE1, 0x5C439944, 0xDAD7EBEA, 0x118B384F,
0xE0DD8A9A, 0x2B81593F, 0xAD152B91, 0x6649F834,
0x7B4CC88C, 0xB0101B29, 0x36846987, 0xFDD8BA22,
0x08F40F5A, 0xC3A8DCFF, 0x453CAE51, 0x8E607DF4,
0x93654D4C, 0x58399EE9, 0xDEADEC47, 0x15F13FE2,
0xE4A78D37, 0x2FFB5E92, 0xA96F2C3C, 0x6233FF99,
0x7F36CF21, 0xB46A1C84, 0x32FE6E2A, 0xF9A2BD8F,
0x0B220DC1, 0xC07EDE64, 0x46EAACCA, 0x8DB67F6F,
0x90B34FD7, 0x5BEF9C72, 0xDD7BEEDC, 0x16273D79,
0xE7718FAC, 0x2C2D5C09, 0xAAB92EA7, 0x61E5FD02,
0x7CE0CDBA, 0xB7BC1E1F, 0x31286CB1, 0xFA74BF14,
0x1EB014D8, 0xD5ECC77D, 0x5378B5D3, 0x98246676,
0x852156CE, 0x4E7D856B, 0xC8E9F7C5, 0x03B52460,
0xF2E396B5, 0x39BF4510, 0xBF2B37BE, 0x7477E41B,
0x6972D4A3, 0xA22E0706, 0x24BA75A8, 0xEFE6A60D,
0x1D661643, 0xD63AC5E6, 0x50AEB748, 0x9BF264ED,
0x86F75455, 0x4DAB87F0, 0xCB3FF55E, 0x006326FB,
0xF135942E, 0x3A69478B, 0xBCFD3525, 0x77A1E680,
0x6AA4D638, 0xA1F8059D, 0x276C7733, 0xEC30A496,
0x191C11EE, 0xD240C24B, 0x54D4B0E5, 0x9F886340,
0x828D53F8, 0x49D1805D, 0xCF45F2F3, 0x04192156,
0xF54F9383, 0x3E134026, 0xB8873288, 0x73DBE12D,
0x6EDED195, 0xA5820230, 0x2316709E, 0xE84AA33B,
0x1ACA1375, 0xD196C0D0, 0x5702B27E, 0x9C5E61DB,
0x815B5163, 0x4A0782C6, 0xCC93F068, 0x07CF23CD,
0xF6999118, 0x3DC542BD, 0xBB513013, 0x700DE3B6,
0x6D08D30E, 0xA65400AB, 0x20C07205, 0xEB9CA1A0,
0x11E81EB4, 0xDAB4CD11, 0x5C20BFBF, 0x977C6C1A,
0x8A795CA2, 0x41258F07, 0xC7B1FDA9, 0x0CED2E0C,
0xFDBB9CD9, 0x36E74F7C, 0xB0733DD2, 0x7B2FEE77,
0x662ADECF, 0xAD760D6A, 0x2BE27FC4, 0xE0BEAC61,
0x123E1C2F, 0xD962CF8A, 0x5FF6BD24, 0x94AA6E81,
0x89AF5E39, 0x42F38D9C, 0xC467FF32, 0x0F3B2C97,
0xFE6D9E42, 0x35314DE7, 0xB3A53F49, 0x78F9ECEC,
0x65FCDC54, 0xAEA00FF1, 0x28347D5F, 0xE368AEFA,
0x16441B82, 0xDD18C827, 0x5B8CBA89, 0x90D0692C,
0x8DD55994, 0x46898A31, 0xC01DF89F, 0x0B412B3A,
0xFA1799EF, 0x314B4A4A, 0xB7DF38E4, 0x7C83EB41,
0x6186DBF9, 0xAADA085C, 0x2C4E7AF2, 0xE712A957,
0x15921919, 0xDECECABC, 0x585AB812, 0x93066BB7,
0x8E035B0F, 0x455F88AA, 0xC3CBFA04, 0x089729A1,
0xF9C19B74, 0x329D48D1, 0xB4093A7F, 0x7F55E9DA,
0x6250D962, 0xA90C0AC7, 0x2F987869, 0xE4C4ABCC
};
const uint32_t CRC32_T8_6[] = {
0x00000000, 0xA6770BB4, 0x979F1129, 0x31E81A9D,
0xF44F2413, 0x52382FA7, 0x63D0353A, 0xC5A73E8E,
0x33EF4E67, 0x959845D3, 0xA4705F4E, 0x020754FA,
0xC7A06A74, 0x61D761C0, 0x503F7B5D, 0xF64870E9,
0x67DE9CCE, 0xC1A9977A, 0xF0418DE7, 0x56368653,
0x9391B8DD, 0x35E6B369, 0x040EA9F4, 0xA279A240,
0x5431D2A9, 0xF246D91D, 0xC3AEC380, 0x65D9C834,
0xA07EF6BA, 0x0609FD0E, 0x37E1E793, 0x9196EC27,
0xCFBD399C, 0x69CA3228, 0x582228B5, 0xFE552301,
0x3BF21D8F, 0x9D85163B, 0xAC6D0CA6, 0x0A1A0712,
0xFC5277FB, 0x5A257C4F, 0x6BCD66D2, 0xCDBA6D66,
0x081D53E8, 0xAE6A585C, 0x9F8242C1, 0x39F54975,
0xA863A552, 0x0E14AEE6, 0x3FFCB47B, 0x998BBFCF,
0x5C2C8141, 0xFA5B8AF5, 0xCBB39068, 0x6DC49BDC,
0x9B8CEB35, 0x3DFBE081, 0x0C13FA1C, 0xAA64F1A8,
0x6FC3CF26, 0xC9B4C492, 0xF85CDE0F, 0x5E2BD5BB,
0x440B7579, 0xE27C7ECD, 0xD3946450, 0x75E36FE4,
0xB044516A, 0x16335ADE, 0x27DB4043, 0x81AC4BF7,
0x77E43B1E, 0xD19330AA, 0xE07B2A37, 0x460C2183,
0x83AB1F0D, 0x25DC14B9, 0x14340E24, 0xB2430590,
0x23D5E9B7, 0x85A2E203, 0xB44AF89E, 0x123DF32A,
0xD79ACDA4, 0x71EDC610, 0x4005DC8D, 0xE672D739,
0x103AA7D0, 0xB64DAC64, 0x87A5B6F9, 0x21D2BD4D,
0xE47583C3, 0x42028877, 0x73EA92EA, 0xD59D995E,
0x8BB64CE5, 0x2DC14751, 0x1C295DCC, 0xBA5E5678,
0x7FF968F6, 0xD98E6342, 0xE86679DF, 0x4E11726B,
0xB8590282, 0x1E2E0936, 0x2FC613AB, 0x89B1181F,
0x4C162691, 0xEA612D25, 0xDB8937B8, 0x7DFE3C0C,
0xEC68D02B, 0x4A1FDB9F, 0x7BF7C102, 0xDD80CAB6,
0x1827F438, 0xBE50FF8C, 0x8FB8E511, 0x29CFEEA5,
0xDF879E4C, 0x79F095F8, 0x48188F65, 0xEE6F84D1,
0x2BC8BA5F, 0x8DBFB1EB, 0xBC57AB76, 0x1A20A0C2,
0x8816EAF2, 0x2E61E146, 0x1F89FBDB, 0xB9FEF06F,
0x7C59CEE1, 0xDA2EC555, 0xEBC6DFC8, 0x4DB1D47C,
0xBBF9A495, 0x1D8EAF21, 0x2C66B5BC, 0x8A11BE08,
0x4FB68086, 0xE9C18B32, 0xD82991AF, 0x7E5E9A1B,
0xEFC8763C, 0x49BF7D88, 0x78576715, 0xDE206CA1,
0x1B87522F, 0xBDF0599B, 0x8C184306, 0x2A6F48B2,
0xDC27385B, 0x7A5033EF, 0x4BB82972, 0xEDCF22C6,
0x28681C48, 0x8E1F17FC, 0xBFF70D61, 0x198006D5,
0x47ABD36E, 0xE1DCD8DA, 0xD034C247, 0x7643C9F3,
0xB3E4F77D, 0x1593FCC9, 0x247BE654, 0x820CEDE0,
0x74449D09, 0xD23396BD, 0xE3DB8C20, 0x45AC8794,
0x800BB91A, 0x267CB2AE, 0x1794A833, 0xB1E3A387,
0x20754FA0, 0x86024414, 0xB7EA5E89, 0x119D553D,
0xD43A6BB3, 0x724D6007, 0x43A57A9A, 0xE5D2712E,
0x139A01C7, 0xB5ED0A73, 0x840510EE, 0x22721B5A,
0xE7D525D4, 0x41A22E60, 0x704A34FD, 0xD63D3F49,
0xCC1D9F8B, 0x6A6A943F, 0x5B828EA2, 0xFDF58516,
0x3852BB98, 0x9E25B02C, 0xAFCDAAB1, 0x09BAA105,
0xFFF2D1EC, 0x5985DA58, 0x686DC0C5, 0xCE1ACB71,
0x0BBDF5FF, 0xADCAFE4B, 0x9C22E4D6, 0x3A55EF62,
0xABC30345, 0x0DB408F1, 0x3C5C126C, 0x9A2B19D8,
0x5F8C2756, 0xF9FB2CE2, 0xC813367F, 0x6E643DCB,
0x982C4D22, 0x3E5B4696, 0x0FB35C0B, 0xA9C457BF,
0x6C636931, 0xCA146285, 0xFBFC7818, 0x5D8B73AC,
0x03A0A617, 0xA5D7ADA3, 0x943FB73E, 0x3248BC8A,
0xF7EF8204, 0x519889B0, 0x6070932D, 0xC6079899,
0x304FE870, 0x9638E3C4, 0xA7D0F959, 0x01A7F2ED,
0xC400CC63, 0x6277C7D7, 0x539FDD4A, 0xF5E8D6FE,
0x647E3AD9, 0xC209316D, 0xF3E12BF0, 0x55962044,
0x90311ECA, 0x3646157E, 0x07AE0FE3, 0xA1D90457,
0x579174BE, 0xF1E67F0A, 0xC00E6597, 0x66796E23,
0xA3DE50AD, 0x05A95B19, 0x34414184, 0x92364A30
};
const uint32_t CRC32_T8_7[] = {
0x00000000, 0xCCAA009E, 0x4225077D, 0x8E8F07E3,
0x844A0EFA, 0x48E00E64, 0xC66F0987, 0x0AC50919,
0xD3E51BB5, 0x1F4F1B2B, 0x91C01CC8, 0x5D6A1C56,
0x57AF154F, 0x9B0515D1, 0x158A1232, 0xD92012AC,
0x7CBB312B, 0xB01131B5, 0x3E9E3656, 0xF23436C8,
0xF8F13FD1, 0x345B3F4F, 0xBAD438AC, 0x767E3832,
0xAF5E2A9E, 0x63F42A00, 0xED7B2DE3, 0x21D12D7D,
0x2B142464, 0xE7BE24FA, 0x69312319, 0xA59B2387,
0xF9766256, 0x35DC62C8, 0xBB53652B, 0x77F965B5,
0x7D3C6CAC, 0xB1966C32, 0x3F196BD1, 0xF3B36B4F,
0x2A9379E3, 0xE639797D, 0x68B67E9E, 0xA41C7E00,
0xAED97719, 0x62737787, 0xECFC7064, 0x205670FA,
0x85CD537D, 0x496753E3, 0xC7E85400, 0x0B42549E,
0x01875D87, 0xCD2D5D19, 0x43A25AFA, 0x8F085A64,
0x562848C8, 0x9A824856, 0x140D4FB5, 0xD8A74F2B,
0xD2624632, 0x1EC846AC, 0x9047414F, 0x5CED41D1,
0x299DC2ED, 0xE537C273, 0x6BB8C590, 0xA712C50E,
0xADD7CC17, 0x617DCC89, 0xEFF2CB6A, 0x2358CBF4,
0xFA78D958, 0x36D2D9C6, 0xB85DDE25, 0x74F7DEBB,
0x7E32D7A2, 0xB298D73C, 0x3C17D0DF, 0xF0BDD041,
0x5526F3C6, 0x998CF358, 0x1703F4BB, 0xDBA9F425,
0xD16CFD3C, 0x1DC6FDA2, 0x9349FA41, 0x5FE3FADF,
0x86C3E873, 0x4A69E8ED, 0xC4E6EF0E, 0x084CEF90,
0x0289E689, 0xCE23E617, 0x40ACE1F4, 0x8C06E16A,
0xD0EBA0BB, 0x1C41A025, 0x92CEA7C6, 0x5E64A758,
0x54A1AE41, 0x980BAEDF, 0x1684A93C, 0xDA2EA9A2,
0x030EBB0E, 0xCFA4BB90, 0x412BBC73, 0x8D81BCED,
0x8744B5F4, 0x4BEEB56A, 0xC561B289, 0x09CBB217,
0xAC509190, 0x60FA910E, 0xEE7596ED, 0x22DF9673,
0x281A9F6A, 0xE4B09FF4, 0x6A3F9817, 0xA6959889,
0x7FB58A25, 0xB31F8ABB, 0x3D908D58, 0xF13A8DC6,
0xFBFF84DF, 0x37558441, 0xB9DA83A2, 0x7570833C,
0x533B85DA, 0x9F918544, 0x111E82A7, 0xDDB48239,
0xD7718B20, 0x1BDB8BBE, 0x95548C5D, 0x59FE8CC3,
0x80DE9E6F, 0x4C749EF1, 0xC2FB9912, 0x0E51998C,
0x04949095, 0xC83E900B, 0x46B197E8, 0x8A1B9776,
0x2F80B4F1, 0xE32AB46F, 0x6DA5B38C, 0xA10FB312,
0xABCABA0B, 0x6760BA95, 0xE9EFBD76, 0x2545BDE8,
0xFC65AF44, 0x30CFAFDA, 0xBE40A839, 0x72EAA8A7,
0x782FA1BE, 0xB485A120, 0x3A0AA6C3, 0xF6A0A65D,
0xAA4DE78C, 0x66E7E712, 0xE868E0F1, 0x24C2E06F,
0x2E07E976, 0xE2ADE9E8, 0x6C22EE0B, 0xA088EE95,
0x79A8FC39, 0xB502FCA7, 0x3B8DFB44, 0xF727FBDA,
0xFDE2F2C3, 0x3148F25D, 0xBFC7F5BE, 0x736DF520,
0xD6F6D6A7, 0x1A5CD639, 0x94D3D1DA, 0x5879D144,
0x52BCD85D, 0x9E16D8C3, 0x1099DF20, 0xDC33DFBE,
0x0513CD12, 0xC9B9CD8C, 0x4736CA6F, 0x8B9CCAF1,
0x8159C3E8, 0x4DF3C376, 0xC37CC495, 0x0FD6C40B,
0x7AA64737, 0xB60C47A9, 0x3883404A, 0xF42940D4,
0xFEEC49CD, 0x32464953, 0xBCC94EB0, 0x70634E2E,
0xA9435C82, 0x65E95C1C, 0xEB665BFF, 0x27CC5B61,
0x2D095278, 0xE1A352E6, 0x6F2C5505, 0xA386559B,
0x061D761C, 0xCAB77682, 0x44387161, 0x889271FF,
0x825778E6, 0x4EFD7878, 0xC0727F9B, 0x0CD87F05,
0xD5F86DA9, 0x19526D37, 0x97DD6AD4, 0x5B776A4A,
0x51B26353, 0x9D1863CD, 0x1397642E, 0xDF3D64B0,
0x83D02561, 0x4F7A25FF, 0xC1F5221C, 0x0D5F2282,
0x079A2B9B, 0xCB302B05, 0x45BF2CE6, 0x89152C78,
0x50353ED4, 0x9C9F3E4A, 0x121039A9, 0xDEBA3937,
0xD47F302E, 0x18D530B0, 0x965A3753, 0x5AF037CD,
0xFF6B144A, 0x33C114D4, 0xBD4E1337, 0x71E413A9,
0x7B211AB0, 0xB78B1A2E, 0x39041DCD, 0xF5AE1D53,
0x2C8E0FFF, 0xE0240F61, 0x6EAB0882, 0xA201081C,
0xA8C40105, 0x646E019B, 0xEAE10678, 0x264B06E6
};

View File

@ -0,0 +1,550 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/*
* CRC-32 lookup tables generated by the polynomial 82F63B78
* See also TestPureJavaCrc32.Table.
*/
const uint32_t CRC32C_T8_0[256] = {
0x00000000, 0xF26B8303, 0xE13B70F7, 0x1350F3F4,
0xC79A971F, 0x35F1141C, 0x26A1E7E8, 0xD4CA64EB,
0x8AD958CF, 0x78B2DBCC, 0x6BE22838, 0x9989AB3B,
0x4D43CFD0, 0xBF284CD3, 0xAC78BF27, 0x5E133C24,
0x105EC76F, 0xE235446C, 0xF165B798, 0x030E349B,
0xD7C45070, 0x25AFD373, 0x36FF2087, 0xC494A384,
0x9A879FA0, 0x68EC1CA3, 0x7BBCEF57, 0x89D76C54,
0x5D1D08BF, 0xAF768BBC, 0xBC267848, 0x4E4DFB4B,
0x20BD8EDE, 0xD2D60DDD, 0xC186FE29, 0x33ED7D2A,
0xE72719C1, 0x154C9AC2, 0x061C6936, 0xF477EA35,
0xAA64D611, 0x580F5512, 0x4B5FA6E6, 0xB93425E5,
0x6DFE410E, 0x9F95C20D, 0x8CC531F9, 0x7EAEB2FA,
0x30E349B1, 0xC288CAB2, 0xD1D83946, 0x23B3BA45,
0xF779DEAE, 0x05125DAD, 0x1642AE59, 0xE4292D5A,
0xBA3A117E, 0x4851927D, 0x5B016189, 0xA96AE28A,
0x7DA08661, 0x8FCB0562, 0x9C9BF696, 0x6EF07595,
0x417B1DBC, 0xB3109EBF, 0xA0406D4B, 0x522BEE48,
0x86E18AA3, 0x748A09A0, 0x67DAFA54, 0x95B17957,
0xCBA24573, 0x39C9C670, 0x2A993584, 0xD8F2B687,
0x0C38D26C, 0xFE53516F, 0xED03A29B, 0x1F682198,
0x5125DAD3, 0xA34E59D0, 0xB01EAA24, 0x42752927,
0x96BF4DCC, 0x64D4CECF, 0x77843D3B, 0x85EFBE38,
0xDBFC821C, 0x2997011F, 0x3AC7F2EB, 0xC8AC71E8,
0x1C661503, 0xEE0D9600, 0xFD5D65F4, 0x0F36E6F7,
0x61C69362, 0x93AD1061, 0x80FDE395, 0x72966096,
0xA65C047D, 0x5437877E, 0x4767748A, 0xB50CF789,
0xEB1FCBAD, 0x197448AE, 0x0A24BB5A, 0xF84F3859,
0x2C855CB2, 0xDEEEDFB1, 0xCDBE2C45, 0x3FD5AF46,
0x7198540D, 0x83F3D70E, 0x90A324FA, 0x62C8A7F9,
0xB602C312, 0x44694011, 0x5739B3E5, 0xA55230E6,
0xFB410CC2, 0x092A8FC1, 0x1A7A7C35, 0xE811FF36,
0x3CDB9BDD, 0xCEB018DE, 0xDDE0EB2A, 0x2F8B6829,
0x82F63B78, 0x709DB87B, 0x63CD4B8F, 0x91A6C88C,
0x456CAC67, 0xB7072F64, 0xA457DC90, 0x563C5F93,
0x082F63B7, 0xFA44E0B4, 0xE9141340, 0x1B7F9043,
0xCFB5F4A8, 0x3DDE77AB, 0x2E8E845F, 0xDCE5075C,
0x92A8FC17, 0x60C37F14, 0x73938CE0, 0x81F80FE3,
0x55326B08, 0xA759E80B, 0xB4091BFF, 0x466298FC,
0x1871A4D8, 0xEA1A27DB, 0xF94AD42F, 0x0B21572C,
0xDFEB33C7, 0x2D80B0C4, 0x3ED04330, 0xCCBBC033,
0xA24BB5A6, 0x502036A5, 0x4370C551, 0xB11B4652,
0x65D122B9, 0x97BAA1BA, 0x84EA524E, 0x7681D14D,
0x2892ED69, 0xDAF96E6A, 0xC9A99D9E, 0x3BC21E9D,
0xEF087A76, 0x1D63F975, 0x0E330A81, 0xFC588982,
0xB21572C9, 0x407EF1CA, 0x532E023E, 0xA145813D,
0x758FE5D6, 0x87E466D5, 0x94B49521, 0x66DF1622,
0x38CC2A06, 0xCAA7A905, 0xD9F75AF1, 0x2B9CD9F2,
0xFF56BD19, 0x0D3D3E1A, 0x1E6DCDEE, 0xEC064EED,
0xC38D26C4, 0x31E6A5C7, 0x22B65633, 0xD0DDD530,
0x0417B1DB, 0xF67C32D8, 0xE52CC12C, 0x1747422F,
0x49547E0B, 0xBB3FFD08, 0xA86F0EFC, 0x5A048DFF,
0x8ECEE914, 0x7CA56A17, 0x6FF599E3, 0x9D9E1AE0,
0xD3D3E1AB, 0x21B862A8, 0x32E8915C, 0xC083125F,
0x144976B4, 0xE622F5B7, 0xF5720643, 0x07198540,
0x590AB964, 0xAB613A67, 0xB831C993, 0x4A5A4A90,
0x9E902E7B, 0x6CFBAD78, 0x7FAB5E8C, 0x8DC0DD8F,
0xE330A81A, 0x115B2B19, 0x020BD8ED, 0xF0605BEE,
0x24AA3F05, 0xD6C1BC06, 0xC5914FF2, 0x37FACCF1,
0x69E9F0D5, 0x9B8273D6, 0x88D28022, 0x7AB90321,
0xAE7367CA, 0x5C18E4C9, 0x4F48173D, 0xBD23943E,
0xF36E6F75, 0x0105EC76, 0x12551F82, 0xE03E9C81,
0x34F4F86A, 0xC69F7B69, 0xD5CF889D, 0x27A40B9E,
0x79B737BA, 0x8BDCB4B9, 0x988C474D, 0x6AE7C44E,
0xBE2DA0A5, 0x4C4623A6, 0x5F16D052, 0xAD7D5351
};
const uint32_t CRC32C_T8_1[256] = {
0x00000000, 0x13A29877, 0x274530EE, 0x34E7A899,
0x4E8A61DC, 0x5D28F9AB, 0x69CF5132, 0x7A6DC945,
0x9D14C3B8, 0x8EB65BCF, 0xBA51F356, 0xA9F36B21,
0xD39EA264, 0xC03C3A13, 0xF4DB928A, 0xE7790AFD,
0x3FC5F181, 0x2C6769F6, 0x1880C16F, 0x0B225918,
0x714F905D, 0x62ED082A, 0x560AA0B3, 0x45A838C4,
0xA2D13239, 0xB173AA4E, 0x859402D7, 0x96369AA0,
0xEC5B53E5, 0xFFF9CB92, 0xCB1E630B, 0xD8BCFB7C,
0x7F8BE302, 0x6C297B75, 0x58CED3EC, 0x4B6C4B9B,
0x310182DE, 0x22A31AA9, 0x1644B230, 0x05E62A47,
0xE29F20BA, 0xF13DB8CD, 0xC5DA1054, 0xD6788823,
0xAC154166, 0xBFB7D911, 0x8B507188, 0x98F2E9FF,
0x404E1283, 0x53EC8AF4, 0x670B226D, 0x74A9BA1A,
0x0EC4735F, 0x1D66EB28, 0x298143B1, 0x3A23DBC6,
0xDD5AD13B, 0xCEF8494C, 0xFA1FE1D5, 0xE9BD79A2,
0x93D0B0E7, 0x80722890, 0xB4958009, 0xA737187E,
0xFF17C604, 0xECB55E73, 0xD852F6EA, 0xCBF06E9D,
0xB19DA7D8, 0xA23F3FAF, 0x96D89736, 0x857A0F41,
0x620305BC, 0x71A19DCB, 0x45463552, 0x56E4AD25,
0x2C896460, 0x3F2BFC17, 0x0BCC548E, 0x186ECCF9,
0xC0D23785, 0xD370AFF2, 0xE797076B, 0xF4359F1C,
0x8E585659, 0x9DFACE2E, 0xA91D66B7, 0xBABFFEC0,
0x5DC6F43D, 0x4E646C4A, 0x7A83C4D3, 0x69215CA4,
0x134C95E1, 0x00EE0D96, 0x3409A50F, 0x27AB3D78,
0x809C2506, 0x933EBD71, 0xA7D915E8, 0xB47B8D9F,
0xCE1644DA, 0xDDB4DCAD, 0xE9537434, 0xFAF1EC43,
0x1D88E6BE, 0x0E2A7EC9, 0x3ACDD650, 0x296F4E27,
0x53028762, 0x40A01F15, 0x7447B78C, 0x67E52FFB,
0xBF59D487, 0xACFB4CF0, 0x981CE469, 0x8BBE7C1E,
0xF1D3B55B, 0xE2712D2C, 0xD69685B5, 0xC5341DC2,
0x224D173F, 0x31EF8F48, 0x050827D1, 0x16AABFA6,
0x6CC776E3, 0x7F65EE94, 0x4B82460D, 0x5820DE7A,
0xFBC3FAF9, 0xE861628E, 0xDC86CA17, 0xCF245260,
0xB5499B25, 0xA6EB0352, 0x920CABCB, 0x81AE33BC,
0x66D73941, 0x7575A136, 0x419209AF, 0x523091D8,
0x285D589D, 0x3BFFC0EA, 0x0F186873, 0x1CBAF004,
0xC4060B78, 0xD7A4930F, 0xE3433B96, 0xF0E1A3E1,
0x8A8C6AA4, 0x992EF2D3, 0xADC95A4A, 0xBE6BC23D,
0x5912C8C0, 0x4AB050B7, 0x7E57F82E, 0x6DF56059,
0x1798A91C, 0x043A316B, 0x30DD99F2, 0x237F0185,
0x844819FB, 0x97EA818C, 0xA30D2915, 0xB0AFB162,
0xCAC27827, 0xD960E050, 0xED8748C9, 0xFE25D0BE,
0x195CDA43, 0x0AFE4234, 0x3E19EAAD, 0x2DBB72DA,
0x57D6BB9F, 0x447423E8, 0x70938B71, 0x63311306,
0xBB8DE87A, 0xA82F700D, 0x9CC8D894, 0x8F6A40E3,
0xF50789A6, 0xE6A511D1, 0xD242B948, 0xC1E0213F,
0x26992BC2, 0x353BB3B5, 0x01DC1B2C, 0x127E835B,
0x68134A1E, 0x7BB1D269, 0x4F567AF0, 0x5CF4E287,
0x04D43CFD, 0x1776A48A, 0x23910C13, 0x30339464,
0x4A5E5D21, 0x59FCC556, 0x6D1B6DCF, 0x7EB9F5B8,
0x99C0FF45, 0x8A626732, 0xBE85CFAB, 0xAD2757DC,
0xD74A9E99, 0xC4E806EE, 0xF00FAE77, 0xE3AD3600,
0x3B11CD7C, 0x28B3550B, 0x1C54FD92, 0x0FF665E5,
0x759BACA0, 0x663934D7, 0x52DE9C4E, 0x417C0439,
0xA6050EC4, 0xB5A796B3, 0x81403E2A, 0x92E2A65D,
0xE88F6F18, 0xFB2DF76F, 0xCFCA5FF6, 0xDC68C781,
0x7B5FDFFF, 0x68FD4788, 0x5C1AEF11, 0x4FB87766,
0x35D5BE23, 0x26772654, 0x12908ECD, 0x013216BA,
0xE64B1C47, 0xF5E98430, 0xC10E2CA9, 0xD2ACB4DE,
0xA8C17D9B, 0xBB63E5EC, 0x8F844D75, 0x9C26D502,
0x449A2E7E, 0x5738B609, 0x63DF1E90, 0x707D86E7,
0x0A104FA2, 0x19B2D7D5, 0x2D557F4C, 0x3EF7E73B,
0xD98EEDC6, 0xCA2C75B1, 0xFECBDD28, 0xED69455F,
0x97048C1A, 0x84A6146D, 0xB041BCF4, 0xA3E32483
};
const uint32_t CRC32C_T8_2[256] = {
0x00000000, 0xA541927E, 0x4F6F520D, 0xEA2EC073,
0x9EDEA41A, 0x3B9F3664, 0xD1B1F617, 0x74F06469,
0x38513EC5, 0x9D10ACBB, 0x773E6CC8, 0xD27FFEB6,
0xA68F9ADF, 0x03CE08A1, 0xE9E0C8D2, 0x4CA15AAC,
0x70A27D8A, 0xD5E3EFF4, 0x3FCD2F87, 0x9A8CBDF9,
0xEE7CD990, 0x4B3D4BEE, 0xA1138B9D, 0x045219E3,
0x48F3434F, 0xEDB2D131, 0x079C1142, 0xA2DD833C,
0xD62DE755, 0x736C752B, 0x9942B558, 0x3C032726,
0xE144FB14, 0x4405696A, 0xAE2BA919, 0x0B6A3B67,
0x7F9A5F0E, 0xDADBCD70, 0x30F50D03, 0x95B49F7D,
0xD915C5D1, 0x7C5457AF, 0x967A97DC, 0x333B05A2,
0x47CB61CB, 0xE28AF3B5, 0x08A433C6, 0xADE5A1B8,
0x91E6869E, 0x34A714E0, 0xDE89D493, 0x7BC846ED,
0x0F382284, 0xAA79B0FA, 0x40577089, 0xE516E2F7,
0xA9B7B85B, 0x0CF62A25, 0xE6D8EA56, 0x43997828,
0x37691C41, 0x92288E3F, 0x78064E4C, 0xDD47DC32,
0xC76580D9, 0x622412A7, 0x880AD2D4, 0x2D4B40AA,
0x59BB24C3, 0xFCFAB6BD, 0x16D476CE, 0xB395E4B0,
0xFF34BE1C, 0x5A752C62, 0xB05BEC11, 0x151A7E6F,
0x61EA1A06, 0xC4AB8878, 0x2E85480B, 0x8BC4DA75,
0xB7C7FD53, 0x12866F2D, 0xF8A8AF5E, 0x5DE93D20,
0x29195949, 0x8C58CB37, 0x66760B44, 0xC337993A,
0x8F96C396, 0x2AD751E8, 0xC0F9919B, 0x65B803E5,
0x1148678C, 0xB409F5F2, 0x5E273581, 0xFB66A7FF,
0x26217BCD, 0x8360E9B3, 0x694E29C0, 0xCC0FBBBE,
0xB8FFDFD7, 0x1DBE4DA9, 0xF7908DDA, 0x52D11FA4,
0x1E704508, 0xBB31D776, 0x511F1705, 0xF45E857B,
0x80AEE112, 0x25EF736C, 0xCFC1B31F, 0x6A802161,
0x56830647, 0xF3C29439, 0x19EC544A, 0xBCADC634,
0xC85DA25D, 0x6D1C3023, 0x8732F050, 0x2273622E,
0x6ED23882, 0xCB93AAFC, 0x21BD6A8F, 0x84FCF8F1,
0xF00C9C98, 0x554D0EE6, 0xBF63CE95, 0x1A225CEB,
0x8B277743, 0x2E66E53D, 0xC448254E, 0x6109B730,
0x15F9D359, 0xB0B84127, 0x5A968154, 0xFFD7132A,
0xB3764986, 0x1637DBF8, 0xFC191B8B, 0x595889F5,
0x2DA8ED9C, 0x88E97FE2, 0x62C7BF91, 0xC7862DEF,
0xFB850AC9, 0x5EC498B7, 0xB4EA58C4, 0x11ABCABA,
0x655BAED3, 0xC01A3CAD, 0x2A34FCDE, 0x8F756EA0,
0xC3D4340C, 0x6695A672, 0x8CBB6601, 0x29FAF47F,
0x5D0A9016, 0xF84B0268, 0x1265C21B, 0xB7245065,
0x6A638C57, 0xCF221E29, 0x250CDE5A, 0x804D4C24,
0xF4BD284D, 0x51FCBA33, 0xBBD27A40, 0x1E93E83E,
0x5232B292, 0xF77320EC, 0x1D5DE09F, 0xB81C72E1,
0xCCEC1688, 0x69AD84F6, 0x83834485, 0x26C2D6FB,
0x1AC1F1DD, 0xBF8063A3, 0x55AEA3D0, 0xF0EF31AE,
0x841F55C7, 0x215EC7B9, 0xCB7007CA, 0x6E3195B4,
0x2290CF18, 0x87D15D66, 0x6DFF9D15, 0xC8BE0F6B,
0xBC4E6B02, 0x190FF97C, 0xF321390F, 0x5660AB71,
0x4C42F79A, 0xE90365E4, 0x032DA597, 0xA66C37E9,
0xD29C5380, 0x77DDC1FE, 0x9DF3018D, 0x38B293F3,
0x7413C95F, 0xD1525B21, 0x3B7C9B52, 0x9E3D092C,
0xEACD6D45, 0x4F8CFF3B, 0xA5A23F48, 0x00E3AD36,
0x3CE08A10, 0x99A1186E, 0x738FD81D, 0xD6CE4A63,
0xA23E2E0A, 0x077FBC74, 0xED517C07, 0x4810EE79,
0x04B1B4D5, 0xA1F026AB, 0x4BDEE6D8, 0xEE9F74A6,
0x9A6F10CF, 0x3F2E82B1, 0xD50042C2, 0x7041D0BC,
0xAD060C8E, 0x08479EF0, 0xE2695E83, 0x4728CCFD,
0x33D8A894, 0x96993AEA, 0x7CB7FA99, 0xD9F668E7,
0x9557324B, 0x3016A035, 0xDA386046, 0x7F79F238,
0x0B899651, 0xAEC8042F, 0x44E6C45C, 0xE1A75622,
0xDDA47104, 0x78E5E37A, 0x92CB2309, 0x378AB177,
0x437AD51E, 0xE63B4760, 0x0C158713, 0xA954156D,
0xE5F54FC1, 0x40B4DDBF, 0xAA9A1DCC, 0x0FDB8FB2,
0x7B2BEBDB, 0xDE6A79A5, 0x3444B9D6, 0x91052BA8
};
const uint32_t CRC32C_T8_3[256] = {
0x00000000, 0xDD45AAB8, 0xBF672381, 0x62228939,
0x7B2231F3, 0xA6679B4B, 0xC4451272, 0x1900B8CA,
0xF64463E6, 0x2B01C95E, 0x49234067, 0x9466EADF,
0x8D665215, 0x5023F8AD, 0x32017194, 0xEF44DB2C,
0xE964B13D, 0x34211B85, 0x560392BC, 0x8B463804,
0x924680CE, 0x4F032A76, 0x2D21A34F, 0xF06409F7,
0x1F20D2DB, 0xC2657863, 0xA047F15A, 0x7D025BE2,
0x6402E328, 0xB9474990, 0xDB65C0A9, 0x06206A11,
0xD725148B, 0x0A60BE33, 0x6842370A, 0xB5079DB2,
0xAC072578, 0x71428FC0, 0x136006F9, 0xCE25AC41,
0x2161776D, 0xFC24DDD5, 0x9E0654EC, 0x4343FE54,
0x5A43469E, 0x8706EC26, 0xE524651F, 0x3861CFA7,
0x3E41A5B6, 0xE3040F0E, 0x81268637, 0x5C632C8F,
0x45639445, 0x98263EFD, 0xFA04B7C4, 0x27411D7C,
0xC805C650, 0x15406CE8, 0x7762E5D1, 0xAA274F69,
0xB327F7A3, 0x6E625D1B, 0x0C40D422, 0xD1057E9A,
0xABA65FE7, 0x76E3F55F, 0x14C17C66, 0xC984D6DE,
0xD0846E14, 0x0DC1C4AC, 0x6FE34D95, 0xB2A6E72D,
0x5DE23C01, 0x80A796B9, 0xE2851F80, 0x3FC0B538,
0x26C00DF2, 0xFB85A74A, 0x99A72E73, 0x44E284CB,
0x42C2EEDA, 0x9F874462, 0xFDA5CD5B, 0x20E067E3,
0x39E0DF29, 0xE4A57591, 0x8687FCA8, 0x5BC25610,
0xB4868D3C, 0x69C32784, 0x0BE1AEBD, 0xD6A40405,
0xCFA4BCCF, 0x12E11677, 0x70C39F4E, 0xAD8635F6,
0x7C834B6C, 0xA1C6E1D4, 0xC3E468ED, 0x1EA1C255,
0x07A17A9F, 0xDAE4D027, 0xB8C6591E, 0x6583F3A6,
0x8AC7288A, 0x57828232, 0x35A00B0B, 0xE8E5A1B3,
0xF1E51979, 0x2CA0B3C1, 0x4E823AF8, 0x93C79040,
0x95E7FA51, 0x48A250E9, 0x2A80D9D0, 0xF7C57368,
0xEEC5CBA2, 0x3380611A, 0x51A2E823, 0x8CE7429B,
0x63A399B7, 0xBEE6330F, 0xDCC4BA36, 0x0181108E,
0x1881A844, 0xC5C402FC, 0xA7E68BC5, 0x7AA3217D,
0x52A0C93F, 0x8FE56387, 0xEDC7EABE, 0x30824006,
0x2982F8CC, 0xF4C75274, 0x96E5DB4D, 0x4BA071F5,
0xA4E4AAD9, 0x79A10061, 0x1B838958, 0xC6C623E0,
0xDFC69B2A, 0x02833192, 0x60A1B8AB, 0xBDE41213,
0xBBC47802, 0x6681D2BA, 0x04A35B83, 0xD9E6F13B,
0xC0E649F1, 0x1DA3E349, 0x7F816A70, 0xA2C4C0C8,
0x4D801BE4, 0x90C5B15C, 0xF2E73865, 0x2FA292DD,
0x36A22A17, 0xEBE780AF, 0x89C50996, 0x5480A32E,
0x8585DDB4, 0x58C0770C, 0x3AE2FE35, 0xE7A7548D,
0xFEA7EC47, 0x23E246FF, 0x41C0CFC6, 0x9C85657E,
0x73C1BE52, 0xAE8414EA, 0xCCA69DD3, 0x11E3376B,
0x08E38FA1, 0xD5A62519, 0xB784AC20, 0x6AC10698,
0x6CE16C89, 0xB1A4C631, 0xD3864F08, 0x0EC3E5B0,
0x17C35D7A, 0xCA86F7C2, 0xA8A47EFB, 0x75E1D443,
0x9AA50F6F, 0x47E0A5D7, 0x25C22CEE, 0xF8878656,
0xE1873E9C, 0x3CC29424, 0x5EE01D1D, 0x83A5B7A5,
0xF90696D8, 0x24433C60, 0x4661B559, 0x9B241FE1,
0x8224A72B, 0x5F610D93, 0x3D4384AA, 0xE0062E12,
0x0F42F53E, 0xD2075F86, 0xB025D6BF, 0x6D607C07,
0x7460C4CD, 0xA9256E75, 0xCB07E74C, 0x16424DF4,
0x106227E5, 0xCD278D5D, 0xAF050464, 0x7240AEDC,
0x6B401616, 0xB605BCAE, 0xD4273597, 0x09629F2F,
0xE6264403, 0x3B63EEBB, 0x59416782, 0x8404CD3A,
0x9D0475F0, 0x4041DF48, 0x22635671, 0xFF26FCC9,
0x2E238253, 0xF36628EB, 0x9144A1D2, 0x4C010B6A,
0x5501B3A0, 0x88441918, 0xEA669021, 0x37233A99,
0xD867E1B5, 0x05224B0D, 0x6700C234, 0xBA45688C,
0xA345D046, 0x7E007AFE, 0x1C22F3C7, 0xC167597F,
0xC747336E, 0x1A0299D6, 0x782010EF, 0xA565BA57,
0xBC65029D, 0x6120A825, 0x0302211C, 0xDE478BA4,
0x31035088, 0xEC46FA30, 0x8E647309, 0x5321D9B1,
0x4A21617B, 0x9764CBC3, 0xF54642FA, 0x2803E842
};
const uint32_t CRC32C_T8_4[256] = {
0x00000000, 0x38116FAC, 0x7022DF58, 0x4833B0F4,
0xE045BEB0, 0xD854D11C, 0x906761E8, 0xA8760E44,
0xC5670B91, 0xFD76643D, 0xB545D4C9, 0x8D54BB65,
0x2522B521, 0x1D33DA8D, 0x55006A79, 0x6D1105D5,
0x8F2261D3, 0xB7330E7F, 0xFF00BE8B, 0xC711D127,
0x6F67DF63, 0x5776B0CF, 0x1F45003B, 0x27546F97,
0x4A456A42, 0x725405EE, 0x3A67B51A, 0x0276DAB6,
0xAA00D4F2, 0x9211BB5E, 0xDA220BAA, 0xE2336406,
0x1BA8B557, 0x23B9DAFB, 0x6B8A6A0F, 0x539B05A3,
0xFBED0BE7, 0xC3FC644B, 0x8BCFD4BF, 0xB3DEBB13,
0xDECFBEC6, 0xE6DED16A, 0xAEED619E, 0x96FC0E32,
0x3E8A0076, 0x069B6FDA, 0x4EA8DF2E, 0x76B9B082,
0x948AD484, 0xAC9BBB28, 0xE4A80BDC, 0xDCB96470,
0x74CF6A34, 0x4CDE0598, 0x04EDB56C, 0x3CFCDAC0,
0x51EDDF15, 0x69FCB0B9, 0x21CF004D, 0x19DE6FE1,
0xB1A861A5, 0x89B90E09, 0xC18ABEFD, 0xF99BD151,
0x37516AAE, 0x0F400502, 0x4773B5F6, 0x7F62DA5A,
0xD714D41E, 0xEF05BBB2, 0xA7360B46, 0x9F2764EA,
0xF236613F, 0xCA270E93, 0x8214BE67, 0xBA05D1CB,
0x1273DF8F, 0x2A62B023, 0x625100D7, 0x5A406F7B,
0xB8730B7D, 0x806264D1, 0xC851D425, 0xF040BB89,
0x5836B5CD, 0x6027DA61, 0x28146A95, 0x10050539,
0x7D1400EC, 0x45056F40, 0x0D36DFB4, 0x3527B018,
0x9D51BE5C, 0xA540D1F0, 0xED736104, 0xD5620EA8,
0x2CF9DFF9, 0x14E8B055, 0x5CDB00A1, 0x64CA6F0D,
0xCCBC6149, 0xF4AD0EE5, 0xBC9EBE11, 0x848FD1BD,
0xE99ED468, 0xD18FBBC4, 0x99BC0B30, 0xA1AD649C,
0x09DB6AD8, 0x31CA0574, 0x79F9B580, 0x41E8DA2C,
0xA3DBBE2A, 0x9BCAD186, 0xD3F96172, 0xEBE80EDE,
0x439E009A, 0x7B8F6F36, 0x33BCDFC2, 0x0BADB06E,
0x66BCB5BB, 0x5EADDA17, 0x169E6AE3, 0x2E8F054F,
0x86F90B0B, 0xBEE864A7, 0xF6DBD453, 0xCECABBFF,
0x6EA2D55C, 0x56B3BAF0, 0x1E800A04, 0x269165A8,
0x8EE76BEC, 0xB6F60440, 0xFEC5B4B4, 0xC6D4DB18,
0xABC5DECD, 0x93D4B161, 0xDBE70195, 0xE3F66E39,
0x4B80607D, 0x73910FD1, 0x3BA2BF25, 0x03B3D089,
0xE180B48F, 0xD991DB23, 0x91A26BD7, 0xA9B3047B,
0x01C50A3F, 0x39D46593, 0x71E7D567, 0x49F6BACB,
0x24E7BF1E, 0x1CF6D0B2, 0x54C56046, 0x6CD40FEA,
0xC4A201AE, 0xFCB36E02, 0xB480DEF6, 0x8C91B15A,
0x750A600B, 0x4D1B0FA7, 0x0528BF53, 0x3D39D0FF,
0x954FDEBB, 0xAD5EB117, 0xE56D01E3, 0xDD7C6E4F,
0xB06D6B9A, 0x887C0436, 0xC04FB4C2, 0xF85EDB6E,
0x5028D52A, 0x6839BA86, 0x200A0A72, 0x181B65DE,
0xFA2801D8, 0xC2396E74, 0x8A0ADE80, 0xB21BB12C,
0x1A6DBF68, 0x227CD0C4, 0x6A4F6030, 0x525E0F9C,
0x3F4F0A49, 0x075E65E5, 0x4F6DD511, 0x777CBABD,
0xDF0AB4F9, 0xE71BDB55, 0xAF286BA1, 0x9739040D,
0x59F3BFF2, 0x61E2D05E, 0x29D160AA, 0x11C00F06,
0xB9B60142, 0x81A76EEE, 0xC994DE1A, 0xF185B1B6,
0x9C94B463, 0xA485DBCF, 0xECB66B3B, 0xD4A70497,
0x7CD10AD3, 0x44C0657F, 0x0CF3D58B, 0x34E2BA27,
0xD6D1DE21, 0xEEC0B18D, 0xA6F30179, 0x9EE26ED5,
0x36946091, 0x0E850F3D, 0x46B6BFC9, 0x7EA7D065,
0x13B6D5B0, 0x2BA7BA1C, 0x63940AE8, 0x5B856544,
0xF3F36B00, 0xCBE204AC, 0x83D1B458, 0xBBC0DBF4,
0x425B0AA5, 0x7A4A6509, 0x3279D5FD, 0x0A68BA51,
0xA21EB415, 0x9A0FDBB9, 0xD23C6B4D, 0xEA2D04E1,
0x873C0134, 0xBF2D6E98, 0xF71EDE6C, 0xCF0FB1C0,
0x6779BF84, 0x5F68D028, 0x175B60DC, 0x2F4A0F70,
0xCD796B76, 0xF56804DA, 0xBD5BB42E, 0x854ADB82,
0x2D3CD5C6, 0x152DBA6A, 0x5D1E0A9E, 0x650F6532,
0x081E60E7, 0x300F0F4B, 0x783CBFBF, 0x402DD013,
0xE85BDE57, 0xD04AB1FB, 0x9879010F, 0xA0686EA3
};
const uint32_t CRC32C_T8_5[256] = {
0x00000000, 0xEF306B19, 0xDB8CA0C3, 0x34BCCBDA,
0xB2F53777, 0x5DC55C6E, 0x697997B4, 0x8649FCAD,
0x6006181F, 0x8F367306, 0xBB8AB8DC, 0x54BAD3C5,
0xD2F32F68, 0x3DC34471, 0x097F8FAB, 0xE64FE4B2,
0xC00C303E, 0x2F3C5B27, 0x1B8090FD, 0xF4B0FBE4,
0x72F90749, 0x9DC96C50, 0xA975A78A, 0x4645CC93,
0xA00A2821, 0x4F3A4338, 0x7B8688E2, 0x94B6E3FB,
0x12FF1F56, 0xFDCF744F, 0xC973BF95, 0x2643D48C,
0x85F4168D, 0x6AC47D94, 0x5E78B64E, 0xB148DD57,
0x370121FA, 0xD8314AE3, 0xEC8D8139, 0x03BDEA20,
0xE5F20E92, 0x0AC2658B, 0x3E7EAE51, 0xD14EC548,
0x570739E5, 0xB83752FC, 0x8C8B9926, 0x63BBF23F,
0x45F826B3, 0xAAC84DAA, 0x9E748670, 0x7144ED69,
0xF70D11C4, 0x183D7ADD, 0x2C81B107, 0xC3B1DA1E,
0x25FE3EAC, 0xCACE55B5, 0xFE729E6F, 0x1142F576,
0x970B09DB, 0x783B62C2, 0x4C87A918, 0xA3B7C201,
0x0E045BEB, 0xE13430F2, 0xD588FB28, 0x3AB89031,
0xBCF16C9C, 0x53C10785, 0x677DCC5F, 0x884DA746,
0x6E0243F4, 0x813228ED, 0xB58EE337, 0x5ABE882E,
0xDCF77483, 0x33C71F9A, 0x077BD440, 0xE84BBF59,
0xCE086BD5, 0x213800CC, 0x1584CB16, 0xFAB4A00F,
0x7CFD5CA2, 0x93CD37BB, 0xA771FC61, 0x48419778,
0xAE0E73CA, 0x413E18D3, 0x7582D309, 0x9AB2B810,
0x1CFB44BD, 0xF3CB2FA4, 0xC777E47E, 0x28478F67,
0x8BF04D66, 0x64C0267F, 0x507CEDA5, 0xBF4C86BC,
0x39057A11, 0xD6351108, 0xE289DAD2, 0x0DB9B1CB,
0xEBF65579, 0x04C63E60, 0x307AF5BA, 0xDF4A9EA3,
0x5903620E, 0xB6330917, 0x828FC2CD, 0x6DBFA9D4,
0x4BFC7D58, 0xA4CC1641, 0x9070DD9B, 0x7F40B682,
0xF9094A2F, 0x16392136, 0x2285EAEC, 0xCDB581F5,
0x2BFA6547, 0xC4CA0E5E, 0xF076C584, 0x1F46AE9D,
0x990F5230, 0x763F3929, 0x4283F2F3, 0xADB399EA,
0x1C08B7D6, 0xF338DCCF, 0xC7841715, 0x28B47C0C,
0xAEFD80A1, 0x41CDEBB8, 0x75712062, 0x9A414B7B,
0x7C0EAFC9, 0x933EC4D0, 0xA7820F0A, 0x48B26413,
0xCEFB98BE, 0x21CBF3A7, 0x1577387D, 0xFA475364,
0xDC0487E8, 0x3334ECF1, 0x0788272B, 0xE8B84C32,
0x6EF1B09F, 0x81C1DB86, 0xB57D105C, 0x5A4D7B45,
0xBC029FF7, 0x5332F4EE, 0x678E3F34, 0x88BE542D,
0x0EF7A880, 0xE1C7C399, 0xD57B0843, 0x3A4B635A,
0x99FCA15B, 0x76CCCA42, 0x42700198, 0xAD406A81,
0x2B09962C, 0xC439FD35, 0xF08536EF, 0x1FB55DF6,
0xF9FAB944, 0x16CAD25D, 0x22761987, 0xCD46729E,
0x4B0F8E33, 0xA43FE52A, 0x90832EF0, 0x7FB345E9,
0x59F09165, 0xB6C0FA7C, 0x827C31A6, 0x6D4C5ABF,
0xEB05A612, 0x0435CD0B, 0x308906D1, 0xDFB96DC8,
0x39F6897A, 0xD6C6E263, 0xE27A29B9, 0x0D4A42A0,
0x8B03BE0D, 0x6433D514, 0x508F1ECE, 0xBFBF75D7,
0x120CEC3D, 0xFD3C8724, 0xC9804CFE, 0x26B027E7,
0xA0F9DB4A, 0x4FC9B053, 0x7B757B89, 0x94451090,
0x720AF422, 0x9D3A9F3B, 0xA98654E1, 0x46B63FF8,
0xC0FFC355, 0x2FCFA84C, 0x1B736396, 0xF443088F,
0xD200DC03, 0x3D30B71A, 0x098C7CC0, 0xE6BC17D9,
0x60F5EB74, 0x8FC5806D, 0xBB794BB7, 0x544920AE,
0xB206C41C, 0x5D36AF05, 0x698A64DF, 0x86BA0FC6,
0x00F3F36B, 0xEFC39872, 0xDB7F53A8, 0x344F38B1,
0x97F8FAB0, 0x78C891A9, 0x4C745A73, 0xA344316A,
0x250DCDC7, 0xCA3DA6DE, 0xFE816D04, 0x11B1061D,
0xF7FEE2AF, 0x18CE89B6, 0x2C72426C, 0xC3422975,
0x450BD5D8, 0xAA3BBEC1, 0x9E87751B, 0x71B71E02,
0x57F4CA8E, 0xB8C4A197, 0x8C786A4D, 0x63480154,
0xE501FDF9, 0x0A3196E0, 0x3E8D5D3A, 0xD1BD3623,
0x37F2D291, 0xD8C2B988, 0xEC7E7252, 0x034E194B,
0x8507E5E6, 0x6A378EFF, 0x5E8B4525, 0xB1BB2E3C
};
const uint32_t CRC32C_T8_6[256] = {
0x00000000, 0x68032CC8, 0xD0065990, 0xB8057558,
0xA5E0C5D1, 0xCDE3E919, 0x75E69C41, 0x1DE5B089,
0x4E2DFD53, 0x262ED19B, 0x9E2BA4C3, 0xF628880B,
0xEBCD3882, 0x83CE144A, 0x3BCB6112, 0x53C84DDA,
0x9C5BFAA6, 0xF458D66E, 0x4C5DA336, 0x245E8FFE,
0x39BB3F77, 0x51B813BF, 0xE9BD66E7, 0x81BE4A2F,
0xD27607F5, 0xBA752B3D, 0x02705E65, 0x6A7372AD,
0x7796C224, 0x1F95EEEC, 0xA7909BB4, 0xCF93B77C,
0x3D5B83BD, 0x5558AF75, 0xED5DDA2D, 0x855EF6E5,
0x98BB466C, 0xF0B86AA4, 0x48BD1FFC, 0x20BE3334,
0x73767EEE, 0x1B755226, 0xA370277E, 0xCB730BB6,
0xD696BB3F, 0xBE9597F7, 0x0690E2AF, 0x6E93CE67,
0xA100791B, 0xC90355D3, 0x7106208B, 0x19050C43,
0x04E0BCCA, 0x6CE39002, 0xD4E6E55A, 0xBCE5C992,
0xEF2D8448, 0x872EA880, 0x3F2BDDD8, 0x5728F110,
0x4ACD4199, 0x22CE6D51, 0x9ACB1809, 0xF2C834C1,
0x7AB7077A, 0x12B42BB2, 0xAAB15EEA, 0xC2B27222,
0xDF57C2AB, 0xB754EE63, 0x0F519B3B, 0x6752B7F3,
0x349AFA29, 0x5C99D6E1, 0xE49CA3B9, 0x8C9F8F71,
0x917A3FF8, 0xF9791330, 0x417C6668, 0x297F4AA0,
0xE6ECFDDC, 0x8EEFD114, 0x36EAA44C, 0x5EE98884,
0x430C380D, 0x2B0F14C5, 0x930A619D, 0xFB094D55,
0xA8C1008F, 0xC0C22C47, 0x78C7591F, 0x10C475D7,
0x0D21C55E, 0x6522E996, 0xDD279CCE, 0xB524B006,
0x47EC84C7, 0x2FEFA80F, 0x97EADD57, 0xFFE9F19F,
0xE20C4116, 0x8A0F6DDE, 0x320A1886, 0x5A09344E,
0x09C17994, 0x61C2555C, 0xD9C72004, 0xB1C40CCC,
0xAC21BC45, 0xC422908D, 0x7C27E5D5, 0x1424C91D,
0xDBB77E61, 0xB3B452A9, 0x0BB127F1, 0x63B20B39,
0x7E57BBB0, 0x16549778, 0xAE51E220, 0xC652CEE8,
0x959A8332, 0xFD99AFFA, 0x459CDAA2, 0x2D9FF66A,
0x307A46E3, 0x58796A2B, 0xE07C1F73, 0x887F33BB,
0xF56E0EF4, 0x9D6D223C, 0x25685764, 0x4D6B7BAC,
0x508ECB25, 0x388DE7ED, 0x808892B5, 0xE88BBE7D,
0xBB43F3A7, 0xD340DF6F, 0x6B45AA37, 0x034686FF,
0x1EA33676, 0x76A01ABE, 0xCEA56FE6, 0xA6A6432E,
0x6935F452, 0x0136D89A, 0xB933ADC2, 0xD130810A,
0xCCD53183, 0xA4D61D4B, 0x1CD36813, 0x74D044DB,
0x27180901, 0x4F1B25C9, 0xF71E5091, 0x9F1D7C59,
0x82F8CCD0, 0xEAFBE018, 0x52FE9540, 0x3AFDB988,
0xC8358D49, 0xA036A181, 0x1833D4D9, 0x7030F811,
0x6DD54898, 0x05D66450, 0xBDD31108, 0xD5D03DC0,
0x8618701A, 0xEE1B5CD2, 0x561E298A, 0x3E1D0542,
0x23F8B5CB, 0x4BFB9903, 0xF3FEEC5B, 0x9BFDC093,
0x546E77EF, 0x3C6D5B27, 0x84682E7F, 0xEC6B02B7,
0xF18EB23E, 0x998D9EF6, 0x2188EBAE, 0x498BC766,
0x1A438ABC, 0x7240A674, 0xCA45D32C, 0xA246FFE4,
0xBFA34F6D, 0xD7A063A5, 0x6FA516FD, 0x07A63A35,
0x8FD9098E, 0xE7DA2546, 0x5FDF501E, 0x37DC7CD6,
0x2A39CC5F, 0x423AE097, 0xFA3F95CF, 0x923CB907,
0xC1F4F4DD, 0xA9F7D815, 0x11F2AD4D, 0x79F18185,
0x6414310C, 0x0C171DC4, 0xB412689C, 0xDC114454,
0x1382F328, 0x7B81DFE0, 0xC384AAB8, 0xAB878670,
0xB66236F9, 0xDE611A31, 0x66646F69, 0x0E6743A1,
0x5DAF0E7B, 0x35AC22B3, 0x8DA957EB, 0xE5AA7B23,
0xF84FCBAA, 0x904CE762, 0x2849923A, 0x404ABEF2,
0xB2828A33, 0xDA81A6FB, 0x6284D3A3, 0x0A87FF6B,
0x17624FE2, 0x7F61632A, 0xC7641672, 0xAF673ABA,
0xFCAF7760, 0x94AC5BA8, 0x2CA92EF0, 0x44AA0238,
0x594FB2B1, 0x314C9E79, 0x8949EB21, 0xE14AC7E9,
0x2ED97095, 0x46DA5C5D, 0xFEDF2905, 0x96DC05CD,
0x8B39B544, 0xE33A998C, 0x5B3FECD4, 0x333CC01C,
0x60F48DC6, 0x08F7A10E, 0xB0F2D456, 0xD8F1F89E,
0xC5144817, 0xAD1764DF, 0x15121187, 0x7D113D4F
};
const uint32_t CRC32C_T8_7[256] = {
0x00000000, 0x493C7D27, 0x9278FA4E, 0xDB448769,
0x211D826D, 0x6821FF4A, 0xB3657823, 0xFA590504,
0x423B04DA, 0x0B0779FD, 0xD043FE94, 0x997F83B3,
0x632686B7, 0x2A1AFB90, 0xF15E7CF9, 0xB86201DE,
0x847609B4, 0xCD4A7493, 0x160EF3FA, 0x5F328EDD,
0xA56B8BD9, 0xEC57F6FE, 0x37137197, 0x7E2F0CB0,
0xC64D0D6E, 0x8F717049, 0x5435F720, 0x1D098A07,
0xE7508F03, 0xAE6CF224, 0x7528754D, 0x3C14086A,
0x0D006599, 0x443C18BE, 0x9F789FD7, 0xD644E2F0,
0x2C1DE7F4, 0x65219AD3, 0xBE651DBA, 0xF759609D,
0x4F3B6143, 0x06071C64, 0xDD439B0D, 0x947FE62A,
0x6E26E32E, 0x271A9E09, 0xFC5E1960, 0xB5626447,
0x89766C2D, 0xC04A110A, 0x1B0E9663, 0x5232EB44,
0xA86BEE40, 0xE1579367, 0x3A13140E, 0x732F6929,
0xCB4D68F7, 0x827115D0, 0x593592B9, 0x1009EF9E,
0xEA50EA9A, 0xA36C97BD, 0x782810D4, 0x31146DF3,
0x1A00CB32, 0x533CB615, 0x8878317C, 0xC1444C5B,
0x3B1D495F, 0x72213478, 0xA965B311, 0xE059CE36,
0x583BCFE8, 0x1107B2CF, 0xCA4335A6, 0x837F4881,
0x79264D85, 0x301A30A2, 0xEB5EB7CB, 0xA262CAEC,
0x9E76C286, 0xD74ABFA1, 0x0C0E38C8, 0x453245EF,
0xBF6B40EB, 0xF6573DCC, 0x2D13BAA5, 0x642FC782,
0xDC4DC65C, 0x9571BB7B, 0x4E353C12, 0x07094135,
0xFD504431, 0xB46C3916, 0x6F28BE7F, 0x2614C358,
0x1700AEAB, 0x5E3CD38C, 0x857854E5, 0xCC4429C2,
0x361D2CC6, 0x7F2151E1, 0xA465D688, 0xED59ABAF,
0x553BAA71, 0x1C07D756, 0xC743503F, 0x8E7F2D18,
0x7426281C, 0x3D1A553B, 0xE65ED252, 0xAF62AF75,
0x9376A71F, 0xDA4ADA38, 0x010E5D51, 0x48322076,
0xB26B2572, 0xFB575855, 0x2013DF3C, 0x692FA21B,
0xD14DA3C5, 0x9871DEE2, 0x4335598B, 0x0A0924AC,
0xF05021A8, 0xB96C5C8F, 0x6228DBE6, 0x2B14A6C1,
0x34019664, 0x7D3DEB43, 0xA6796C2A, 0xEF45110D,
0x151C1409, 0x5C20692E, 0x8764EE47, 0xCE589360,
0x763A92BE, 0x3F06EF99, 0xE44268F0, 0xAD7E15D7,
0x572710D3, 0x1E1B6DF4, 0xC55FEA9D, 0x8C6397BA,
0xB0779FD0, 0xF94BE2F7, 0x220F659E, 0x6B3318B9,
0x916A1DBD, 0xD856609A, 0x0312E7F3, 0x4A2E9AD4,
0xF24C9B0A, 0xBB70E62D, 0x60346144, 0x29081C63,
0xD3511967, 0x9A6D6440, 0x4129E329, 0x08159E0E,
0x3901F3FD, 0x703D8EDA, 0xAB7909B3, 0xE2457494,
0x181C7190, 0x51200CB7, 0x8A648BDE, 0xC358F6F9,
0x7B3AF727, 0x32068A00, 0xE9420D69, 0xA07E704E,
0x5A27754A, 0x131B086D, 0xC85F8F04, 0x8163F223,
0xBD77FA49, 0xF44B876E, 0x2F0F0007, 0x66337D20,
0x9C6A7824, 0xD5560503, 0x0E12826A, 0x472EFF4D,
0xFF4CFE93, 0xB67083B4, 0x6D3404DD, 0x240879FA,
0xDE517CFE, 0x976D01D9, 0x4C2986B0, 0x0515FB97,
0x2E015D56, 0x673D2071, 0xBC79A718, 0xF545DA3F,
0x0F1CDF3B, 0x4620A21C, 0x9D642575, 0xD4585852,
0x6C3A598C, 0x250624AB, 0xFE42A3C2, 0xB77EDEE5,
0x4D27DBE1, 0x041BA6C6, 0xDF5F21AF, 0x96635C88,
0xAA7754E2, 0xE34B29C5, 0x380FAEAC, 0x7133D38B,
0x8B6AD68F, 0xC256ABA8, 0x19122CC1, 0x502E51E6,
0xE84C5038, 0xA1702D1F, 0x7A34AA76, 0x3308D751,
0xC951D255, 0x806DAF72, 0x5B29281B, 0x1215553C,
0x230138CF, 0x6A3D45E8, 0xB179C281, 0xF845BFA6,
0x021CBAA2, 0x4B20C785, 0x906440EC, 0xD9583DCB,
0x613A3C15, 0x28064132, 0xF342C65B, 0xBA7EBB7C,
0x4027BE78, 0x091BC35F, 0xD25F4436, 0x9B633911,
0xA777317B, 0xEE4B4C5C, 0x350FCB35, 0x7C33B612,
0x866AB316, 0xCF56CE31, 0x14124958, 0x5D2E347F,
0xE54C35A1, 0xAC704886, 0x7734CFEF, 0x3E08B2C8,
0xC451B7CC, 0x8D6DCAEB, 0x56294D82, 0x1F1530A5
};

View File

@ -0,0 +1,30 @@
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef __GCC_OPTIMIZATIONS_H_INCLUDED
#define __GCC_OPTIMIZATIONS_H_INCLUDED
// Hints to gcc optimizer -- compiled out on non-GCC
#ifdef __GNUC__
#define likely(x) __builtin_expect((x),1)
#define unlikely(x) __builtin_expect((x),0)
#else
#define likely(x) (x)
#define unlikely(x) (x)
#endif
#endif

View File

@ -126,6 +126,8 @@ public class TestFilterFileSystem extends TestCase {
public void moveFromLocalFile(Path[] srcs, Path dst) { }
public void moveFromLocalFile(Path src, Path dst) { }
public void copyToLocalFile(Path src, Path dst) { }
public void copyToLocalFile(boolean delSrc, Path src, Path dst,
boolean useRawLocalFileSystem) { }
public void moveToLocalFile(Path src, Path dst) { }
public long getBlockSize(Path f) { return 0; }
public FSDataOutputStream primitiveCreate(final Path f,

View File

@ -185,6 +185,48 @@ public class TestPath extends TestCase {
assertEquals("foo://bar/fud#boo", new Path(new Path(new URI(
"foo://bar/baz#bud")), new Path(new URI("/fud#boo"))).toString());
}
/** Test URIs created from Path objects */
public void testPathToUriConversion() throws URISyntaxException, IOException {
// Path differs from URI in that it ignores the query part..
assertEquals(new URI(null, null, "/foo?bar", null, null), new Path("/foo?bar").toUri());
assertEquals(new URI(null, null, "/foo\"bar", null, null), new Path("/foo\"bar").toUri());
assertEquals(new URI(null, null, "/foo bar", null, null), new Path("/foo bar").toUri());
// therefore "foo?bar" is a valid Path, so a URI created from a Path has path "foo?bar"
// where in a straight URI the path part is just "foo"
assertEquals("/foo?bar", new Path("http://localhost/foo?bar").toUri().getPath());
assertEquals("/foo", new URI("http://localhost/foo?bar").getPath());
// The path part handling in Path is equivalent to URI
assertEquals(new URI("/foo;bar").getPath(), new Path("/foo;bar").toUri().getPath());
assertEquals(new URI("/foo;bar"), new Path("/foo;bar").toUri());
assertEquals(new URI("/foo+bar"), new Path("/foo+bar").toUri());
assertEquals(new URI("/foo-bar"), new Path("/foo-bar").toUri());
assertEquals(new URI("/foo=bar"), new Path("/foo=bar").toUri());
assertEquals(new URI("/foo,bar"), new Path("/foo,bar").toUri());
}
/** Test reserved characters in URIs (and therefore Paths) */
public void testReservedCharacters() throws URISyntaxException, IOException {
// URI encodes the path
assertEquals("/foo%20bar", new URI(null, null, "/foo bar", null, null).getRawPath());
// URI#getPath decodes the path
assertEquals("/foo bar", new URI(null, null, "/foo bar", null, null).getPath());
// URI#toString returns an encoded path
assertEquals("/foo%20bar", new URI(null, null, "/foo bar", null, null).toString());
assertEquals("/foo%20bar", new Path("/foo bar").toUri().toString());
// Reserved chars are not encoded
assertEquals("/foo;bar", new URI("/foo;bar").getPath());
assertEquals("/foo;bar", new URI("/foo;bar").getRawPath());
assertEquals("/foo+bar", new URI("/foo+bar").getPath());
assertEquals("/foo+bar", new URI("/foo+bar").getRawPath());
// URI#getPath decodes the path part (and URL#getPath does not decode)
assertEquals("/foo bar", new Path("http://localhost/foo bar").toUri().getPath());
assertEquals("/foo%20bar", new Path("http://localhost/foo bar").toUri().toURL().getPath());
assertEquals("/foo?bar", new URI("http", "localhost", "/foo?bar", null, null).getPath());
assertEquals("/foo%3Fbar", new URI("http", "localhost", "/foo?bar", null, null).toURL().getPath());
}
public void testMakeQualified() throws URISyntaxException {
URI defaultUri = new URI("hdfs://host1/dir1");

View File

@ -501,7 +501,7 @@ public class TestCodec {
LOG.info("SUCCESS! Completed SequenceFileCodecTest with codec \"" + codecClass + "\"");
}
public static void main(String[] args) {
public static void main(String[] args) throws IOException {
int count = 10000;
String codecClass = "org.apache.hadoop.io.compress.DefaultCodec";
@ -511,25 +511,20 @@ public class TestCodec {
System.exit(-1);
}
try {
for (int i=0; i < args.length; ++i) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-codec")) {
codecClass = args[++i];
}
for (int i=0; i < args.length; ++i) { // parse command line
if (args[i] == null) {
continue;
} else if (args[i].equals("-count")) {
count = Integer.parseInt(args[++i]);
} else if (args[i].equals("-codec")) {
codecClass = args[++i];
}
Configuration conf = new Configuration();
int seed = 0;
codecTest(conf, seed, count, codecClass);
} catch (Exception e) {
System.err.println("Caught: " + e);
e.printStackTrace();
}
Configuration conf = new Configuration();
int seed = 0;
// Note that exceptions will propagate out.
codecTest(conf, seed, count, codecClass);
}
@Test

View File

@ -18,7 +18,6 @@
package org.apache.hadoop.net;
import java.util.*;
import java.net.UnknownHostException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
@ -60,19 +59,4 @@ public class StaticMapping extends Configured implements DNSToSwitchMapping {
return m;
}
}
public List<String> resolveValidHosts(List<String> names)
throws UnknownHostException {
List<String> m = new ArrayList<String>();
synchronized (nameToRackMap) {
for (String name : names) {
String rackId;
if ((rackId = nameToRackMap.get(name)) != null) {
m.add(rackId);
} else {
throw new UnknownHostException(name);
}
}
return m;
}
}
}

View File

@ -72,4 +72,20 @@ public class TestNetUtils {
assertNull(NetUtils.getLocalInetAddress("invalid-address-for-test"));
assertNull(NetUtils.getLocalInetAddress(null));
}
@Test(expected=UnknownHostException.class)
public void testVerifyHostnamesException() throws UnknownHostException {
String[] names = {"valid.host.com", "1.com", "invalid host here"};
NetUtils.verifyHostnames(names);
}
@Test
public void testVerifyHostnamesNoException() {
String[] names = {"valid.host.com", "1.com"};
try {
NetUtils.verifyHostnames(names);
} catch (UnknownHostException e) {
fail("NetUtils.verifyHostnames threw unexpected UnknownHostException");
}
}
}

View File

@ -19,7 +19,6 @@ package org.apache.hadoop.net;
import java.util.ArrayList;
import java.util.List;
import java.net.UnknownHostException;
import org.apache.hadoop.conf.Configuration;
@ -49,37 +48,5 @@ public class TestScriptBasedMapping extends TestCase {
List<String> result = mapping.resolve(names);
assertNull(result);
}
public void testResolveValidInvalidHostException() {
names = new ArrayList<String>();
names.add("1.com"); // Add invalid hostname that doesn't resolve
boolean exceptionThrown = false;
try {
mapping.resolveValidHosts(names);
} catch (UnknownHostException e) {
exceptionThrown = true;
}
assertTrue(
"resolveValidHosts did not throw UnknownHostException for invalid host",
exceptionThrown);
}
public void testResolveValidHostNoException() {
conf.setInt(ScriptBasedMapping.SCRIPT_ARG_COUNT_KEY,
ScriptBasedMapping.MIN_ALLOWABLE_ARGS);
conf.set(ScriptBasedMapping.SCRIPT_FILENAME_KEY, "echo");
mapping.setConf(conf);
names = new ArrayList<String>();
names.add("some.machine.name");
names.add("other.machine.name");
boolean exceptionThrown = false;
try {
mapping.resolveValidHosts(names);
} catch (UnknownHostException e) {
exceptionThrown = true;
}
assertFalse("resolveValidHosts threw Exception for valid host", exceptionThrown);
}
}

View File

@ -111,6 +111,7 @@ public class TestDataChecksum {
} catch (ChecksumException ce) {
int expectedPos = checksum.getBytesPerChecksum() * (numSums - 1);
assertEquals(expectedPos, ce.getPos());
assertTrue(ce.getMessage().contains("fake file"));
}
}

View File

@ -441,6 +441,11 @@
<artifactId>maven-source-plugin</artifactId>
<version>2.1.2</version>
</plugin>
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-deploy-plugin</artifactId>
<version>2.5</version>
</plugin>
</plugins>
</pluginManagement>
@ -457,9 +462,6 @@
<requireJavaVersion>
<version>1.6</version>
</requireJavaVersion>
<requireOS>
<family>unix</family>
</requireOS>
</rules>
</configuration>
<executions>
@ -502,7 +504,7 @@
<id>os.linux</id>
<activation>
<os>
<family>Linux</family>
<family>!Mac</family>
</os>
</activation>
<properties>

View File

@ -629,6 +629,42 @@ Trunk (unreleased changes)
HDFS-2199. Move blockTokenSecretManager from FSNamesystem to BlockManager.
(Uma Maheswara Rao G via szetszwo)
HDFS-2187. Make EditLogInputStream act like an iterator over FSEditLogOps
(Ivan Kelly and todd via todd)
HDFS-2225. Refactor edit log file management so it's not in classes
which should be generic to the type of edit log storage. (Ivan Kelly
via todd)
HDFS-2108. Move datanode heartbeat handling from namenode package to
blockmanagement package. (szetszwo)
HDFS-2226. Clean up counting of operations in FSEditLogLoader (todd)
HDFS-2228. Move block and datanode code from FSNamesystem to
BlockManager and DatanodeManager. (szetszwo)
HDFS-2238. In NamenodeFsck.toString(), uses StringBuilder.(..) instead of
string concatenation. (Uma Maheswara Rao G via szetszwo)
HDFS-2230. ivy to resolve/retrieve latest common-tests jar published by
hadoop common maven build. (gkesavan)
HDFS-2227. getRemoteEditLogManifest should pull its information from
FileJournalManager during checkpoint process (Ivan Kelly and Todd Lipcon
via todd)
HDFS-2239. Reduce access levels of the fields and methods in FSNamesystem.
(szetszwo)
HDFS-2241. Remove implementing FSConstants interface to just get the
constants from the interface. (suresh)
HDFS-2237. Change UnderReplicatedBlocks from public to package private.
(szetszwo)
HDFS-2233. Add WebUI tests with URI reserved chars. (eli)
OPTIMIZATIONS
HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@ -917,6 +953,22 @@ Trunk (unreleased changes)
HDFS-2196. Make ant build system work with hadoop-common JAR generated
by Maven. (Alejandro Abdelnur via tomwhite)
HDFS-2245. Fix a NullPointerException in BlockManager.chooseTarget(..).
(szetszwo)
HDFS-2229. Fix a deadlock in namenode by enforcing lock acquisition
ordering. (szetszwo)
HDFS-2235. Encode servlet paths. (eli)
HDFS-2186. DN volume failures on startup are not counted. (eli)
HDFS-2240. Fix a deadlock in LeaseRenewer by enforcing lock acquisition
ordering. (szetszwo)
HDFS-73. DFSOutputStream does not close all the sockets.
(Uma Maheswara Rao G via eli)
BREAKDOWN OF HDFS-1073 SUBTASKS
HDFS-1521. Persist transaction ID on disk between NN restarts.

View File

@ -180,7 +180,7 @@
<property name="hadoop-hdfs-test.pom" location="${ivy.dir}/hadoop-hdfs-test.xml"/>
<!--this is the naming policy for artifacts we want pulled down-->
<property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
<property name="ivy.artifact.retrieve.pattern" value="${ant.project.name}/[conf]/[artifact]-[revision](-[classifier]).[ext]"/>
<!--this is how artifacts that get built are named-->
<property name="ivy.publish.pattern" value="hadoop-hdfs-[revision].[ext]"/>
@ -247,8 +247,8 @@
<pathelement location="${build.tools}"/>
<pathelement path="${clover.jar}"/>
<path refid="ivy-test.classpath"/>
<fileset dir="${lib.dir}">
<include name="hadoop-common-test-${hadoop-common.version}.jar" />
<fileset dir="${test.ivy.lib.dir}">
<include name="hadoop-common-${hadoop-common.version}-tests.jar" />
<exclude name="**/excluded/" />
</fileset>
<pathelement location="${build.classes}"/>

View File

@ -15,7 +15,7 @@
limitations under the License.
-->
<ivy-module version="1.0">
<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
<info organisation="org.apache.hadoop" module="${ant.project.name}" revision="${version}">
<license name="Apache 2.0"/>
<ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
@ -87,7 +87,9 @@
<dependency org="org.slf4j" name="slf4j-api" rev="${slf4j-api.version}" conf="test->master"/>
<dependency org="org.slf4j" name="slf4j-log4j12" rev="${slf4j-log4j12.version}" conf="test->master"/>
<dependency org="org.apache.hadoop" name="hadoop-common-test" rev="${hadoop-common.version}" conf="test->master"/>
<dependency org="org.apache.hadoop" name="hadoop-common" rev="${hadoop-common.version}" conf="test->master">
<artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests"/>
</dependency>
<dependency org="checkstyle" name="checkstyle" rev="${checkstyle.version}" conf="checkstyle->default"/>

View File

@ -31,7 +31,8 @@
-->
<property name="repo.maven.org" value="http://repo1.maven.org/maven2/" override="false"/>
<property name="snapshot.apache.org" value="https://repository.apache.org/content/repositories/snapshots/" override="false"/>
<property name="maven2.pattern" value="[organisation]/[module]/[revision]/[module]-[revision]"/>
<property name="maven2.pattern" value="[organisation]/[module]/[revision]/[module]-[revision](-[classifier])"/>
<property name="repo.dir" value="${user.home}/.m2/repository"/>
<property name="maven2.pattern.ext" value="${maven2.pattern}.[ext]"/>
<property name="resolvers" value="default" override="false"/>
@ -41,10 +42,11 @@
<resolvers>
<ibiblio name="maven2" root="${repo.maven.org}" pattern="${maven2.pattern.ext}" m2compatible="true"/>
<ibiblio name="apache-snapshot" root="${snapshot.apache.org}" m2compatible="true"
checkmodified="true" changingPattern=".*SNAPSHOT"/>
checkmodified="true" changingPattern=".*SNAPSHOT" />
<filesystem name="fs" m2compatible="true" force="${force-resolve}">
<artifact pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].[ext]"/>
<artifact pattern="${repo.dir}/${maven2.pattern.ext}"/>
<ivy pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].pom"/>
</filesystem>

View File

@ -41,7 +41,7 @@ hadoop-hdfs.version=0.23.0-SNAPSHOT
hsqldb.version=1.8.0.10
ivy.version=2.1.0
ivy.version=2.2.0-rc1
jasper.version=5.5.12
jdeb.version=0.8

View File

@ -82,7 +82,7 @@
<!--this is the naming policy for artifacts we want pulled down-->
<property name="ivy.artifact.retrieve.pattern"
value="${ant.project.name}/[conf]/[artifact]-[revision].[ext]"/>
value="${ant.project.name}/[conf]/[artifact]-[revision](-[classifier]).[ext]"/>
<!-- the normal classpath -->
<path id="contrib-classpath">

View File

@ -15,7 +15,7 @@
See the License for the specific language governing permissions and
limitations under the License.
-->
<ivy-module version="1.0">
<ivy-module version="1.0" xmlns:m="http://ant.apache.org/ivy/maven">
<info organisation="org.apache.hadoop" module="${ant.project.name}">
<license name="Apache 2.0"/>
<ivyauthor name="Apache Hadoop Team" url="http://hadoop.apache.org"/>
@ -45,13 +45,20 @@
rev="${hadoop-common.version}"
conf="common->default"/>
<dependency org="org.apache.hadoop"
name="hadoop-common-test"
name="hadoop-common"
rev="${hadoop-common.version}"
conf="common->default"/>
conf="common->default">
<artifact name="hadoop-common" type="tests" ext="jar" m:classifier="tests"/>
</dependency>
<dependency org="log4j"
name="log4j"
rev="${log4j.version}"
conf="common->master"/>
conf="common->master">
<exclude org="com.sun.jdmk"/>
<exclude org="com.sun.jmx"/>
<exclude org="javax.jms"/>
</dependency>
<dependency org="commons-logging"
name="commons-logging"
rev="${commons-logging.version}"

View File

@ -61,6 +61,9 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -106,7 +109,7 @@ import org.apache.hadoop.util.Progressable;
*
********************************************************/
@InterfaceAudience.Private
public class DFSClient implements FSConstants, java.io.Closeable {
public class DFSClient implements java.io.Closeable {
public static final Log LOG = LogFactory.getLog(DFSClient.class);
public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
@ -165,7 +168,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
defaultBlockSize = conf.getLong(DFS_BLOCK_SIZE_KEY,
DEFAULT_BLOCK_SIZE);
DFS_BLOCK_SIZE_DEFAULT);
defaultReplication = (short) conf.getInt(
DFS_REPLICATION_KEY, DFS_REPLICATION_DEFAULT);
taskId = conf.get("mapreduce.task.attempt.id", "NONMAPREDUCE");
@ -1043,7 +1046,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
out = new DataOutputStream(
new BufferedOutputStream(NetUtils.getOutputStream(sock),
DataNode.SMALL_BUFFER_SIZE));
FSConstants.SMALL_BUFFER_SIZE));
in = new DataInputStream(NetUtils.getInputStream(sock));
if (LOG.isDebugEnabled()) {

View File

@ -234,7 +234,7 @@ public class DFSConfigKeys extends CommonConfigurationKeys {
public static final String DFS_DF_INTERVAL_KEY = "dfs.df.interval";
public static final int DFS_DF_INTERVAL_DEFAULT = 60000;
public static final String DFS_BLOCKREPORT_INTERVAL_MSEC_KEY = "dfs.blockreport.intervalMsec";
public static final long DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 21600000;
public static final long DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT = 60 * 60 * 1000;
public static final String DFS_BLOCKREPORT_INITIAL_DELAY_KEY = "dfs.blockreport.initialDelay";
public static final int DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT = 0;
public static final String DFS_BLOCK_INVALIDATE_LIMIT_KEY = "dfs.block.invalidate.limit";

View File

@ -36,7 +36,6 @@ import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.atomic.AtomicBoolean;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.CreateFlag;
import org.apache.hadoop.fs.FSOutputSummer;
import org.apache.hadoop.fs.FileAlreadyExistsException;
@ -48,6 +47,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.HdfsProtoUtil;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@ -62,7 +62,6 @@ import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseP
import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
import org.apache.hadoop.io.EnumSetWritable;
@ -167,7 +166,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
this.seqno = HEART_BEAT_SEQNO;
buffer = null;
int packetSize = PacketHeader.PKT_HEADER_LEN + DFSClient.SIZE_OF_INTEGER; // TODO(todd) strange
int packetSize = PacketHeader.PKT_HEADER_LEN + FSConstants.BYTES_IN_INTEGER;
buf = new byte[packetSize];
checksumStart = dataStart = packetSize;
@ -235,12 +234,12 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
dataStart - checksumLen , checksumLen);
}
int pktLen = DFSClient.SIZE_OF_INTEGER + dataLen + checksumLen;
int pktLen = FSConstants.BYTES_IN_INTEGER + dataLen + checksumLen;
//normally dataStart == checksumPos, i.e., offset is zero.
buffer = ByteBuffer.wrap(
buf, dataStart - checksumPos,
PacketHeader.PKT_HEADER_LEN + pktLen - DFSClient.SIZE_OF_INTEGER);
PacketHeader.PKT_HEADER_LEN + pktLen - FSConstants.BYTES_IN_INTEGER);
buf = null;
buffer.mark();
@ -605,6 +604,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
try {
blockStream.close();
} catch (IOException e) {
setLastException(e);
} finally {
blockStream = null;
}
@ -613,10 +613,20 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
try {
blockReplyStream.close();
} catch (IOException e) {
setLastException(e);
} finally {
blockReplyStream = null;
}
}
if (null != s) {
try {
s.close();
} catch (IOException e) {
setLastException(e);
} finally {
s = null;
}
}
}
//
@ -839,7 +849,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
final long writeTimeout = dfsClient.getDatanodeWriteTimeout(2);
out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(sock, writeTimeout),
DataNode.SMALL_BUFFER_SIZE));
FSConstants.SMALL_BUFFER_SIZE));
//send the TRANSFER_BLOCK request
new Sender(out).transferBlock(block, blockToken, dfsClient.clientName,
@ -1002,16 +1012,20 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
persistBlocks.set(true);
boolean result = false;
DataOutputStream out = null;
try {
assert null == s : "Previous socket unclosed";
s = createSocketForPipeline(nodes[0], nodes.length, dfsClient);
long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
//
// Xmit header info to datanode
//
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
out = new DataOutputStream(new BufferedOutputStream(
NetUtils.getOutputStream(s, writeTimeout),
DataNode.SMALL_BUFFER_SIZE));
FSConstants.SMALL_BUFFER_SIZE));
assert null == blockReplyStream : "Previous blockReplyStream unclosed";
blockReplyStream = new DataInputStream(NetUtils.getInputStream(s));
// send the request
@ -1037,7 +1051,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
+ firstBadLink);
}
}
assert null == blockStream : "Previous blockStream unclosed";
blockStream = out;
result = true; // success
@ -1058,12 +1072,15 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
}
hasError = true;
setLastException(ie);
blockReplyStream = null;
result = false; // error
} finally {
if (!result) {
IOUtils.closeSocket(s);
s = null;
IOUtils.closeStream(out);
out = null;
IOUtils.closeStream(blockReplyStream);
blockReplyStream = null;
}
}
return result;
@ -1156,7 +1173,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
final int timeout = client.getDatanodeReadTimeout(length);
NetUtils.connect(sock, isa, timeout);
sock.setSoTimeout(timeout);
sock.setSendBufferSize(DFSClient.DEFAULT_DATA_SOCKET_SIZE);
sock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
if(DFSClient.LOG.isDebugEnabled()) {
DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
}

View File

@ -61,6 +61,7 @@ import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.security.token.TokenIdentifier;
import org.apache.hadoop.util.Progressable;
import org.apache.hadoop.util.ServletUtil;
import org.xml.sax.Attributes;
import org.xml.sax.InputSource;
import org.xml.sax.SAXException;
@ -241,18 +242,6 @@ public class HftpFileSystem extends FileSystem {
}
}
/**
* Return a URL pointing to given path on the namenode.
*
* @param p path to obtain the URL for
* @return namenode URL referring to the given path
* @throws IOException on error constructing the URL
*/
URL getNamenodeFileURL(Path p) throws IOException {
return getNamenodeURL("/data" + p.toUri().getPath(),
"ugi=" + getUgiParameter());
}
/**
* Return a URL pointing to given path on the namenode.
*
@ -262,28 +251,25 @@ public class HftpFileSystem extends FileSystem {
* @throws IOException on error constructing the URL
*/
URL getNamenodeURL(String path, String query) throws IOException {
try {
final URL url = new URI("http", null, nnAddr.getHostName(),
nnAddr.getPort(), path, query, null).toURL();
if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url);
}
return url;
} catch (URISyntaxException e) {
throw new IOException(e);
final URL url = new URL("http", nnAddr.getHostName(),
nnAddr.getPort(), path + '?' + query);
if (LOG.isTraceEnabled()) {
LOG.trace("url=" + url);
}
return url;
}
/**
* ugi parameter for http connection
* Get encoded UGI parameter string for a URL.
*
* @return user_shortname,group1,group2...
*/
private String getUgiParameter() {
StringBuilder ugiParamenter = new StringBuilder(ugi.getShortUserName());
private String getEncodedUgiParameter() {
StringBuilder ugiParamenter = new StringBuilder(
ServletUtil.encodeQueryValue(ugi.getShortUserName()));
for(String g: ugi.getGroupNames()) {
ugiParamenter.append(",");
ugiParamenter.append(g);
ugiParamenter.append(ServletUtil.encodeQueryValue(g));
}
return ugiParamenter.toString();
}
@ -304,7 +290,7 @@ public class HftpFileSystem extends FileSystem {
*/
protected HttpURLConnection openConnection(String path, String query)
throws IOException {
query = updateQuery(query);
query = addDelegationTokenParam(query);
final URL url = getNamenodeURL(path, query);
final HttpURLConnection connection = (HttpURLConnection)url.openConnection();
try {
@ -316,14 +302,14 @@ public class HftpFileSystem extends FileSystem {
return connection;
}
protected String updateQuery(String query) throws IOException {
protected String addDelegationTokenParam(String query) throws IOException {
String tokenString = null;
if (UserGroupInformation.isSecurityEnabled()) {
synchronized (this) {
if (delegationToken != null) {
tokenString = delegationToken.encodeToUrlString();
return (query + JspHelper.getDelegationTokenUrlParam(tokenString));
} // else we are talking to an insecure cluster
}
}
}
return query;
@ -331,9 +317,9 @@ public class HftpFileSystem extends FileSystem {
@Override
public FSDataInputStream open(Path f, int buffersize) throws IOException {
String query = "ugi=" + getUgiParameter();
query = updateQuery(query);
URL u = getNamenodeURL("/data" + f.toUri().getPath(), query);
String path = "/data" + ServletUtil.encodePath(f.toUri().getPath());
String query = addDelegationTokenParam("ugi=" + getEncodedUgiParameter());
URL u = getNamenodeURL(path, query);
return new FSDataInputStream(new ByteRangeInputStream(u));
}
@ -382,9 +368,9 @@ public class HftpFileSystem extends FileSystem {
try {
XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
HttpURLConnection connection = openConnection("/listPaths" + path,
"ugi=" + getUgiParameter() + (recur? "&recursive=yes" : ""));
HttpURLConnection connection = openConnection(
"/listPaths" + ServletUtil.encodePath(path),
"ugi=" + getEncodedUgiParameter() + (recur ? "&recursive=yes" : ""));
InputStream resp = connection.getInputStream();
xr.parse(new InputSource(resp));
} catch(SAXException e) {
@ -447,7 +433,8 @@ public class HftpFileSystem extends FileSystem {
private FileChecksum getFileChecksum(String f) throws IOException {
final HttpURLConnection connection = openConnection(
"/fileChecksum" + f, "ugi=" + getUgiParameter());
"/fileChecksum" + ServletUtil.encodePath(f),
"ugi=" + getEncodedUgiParameter());
try {
final XMLReader xr = XMLReaderFactory.createXMLReader();
xr.setContentHandler(this);
@ -534,7 +521,8 @@ public class HftpFileSystem extends FileSystem {
*/
private ContentSummary getContentSummary(String path) throws IOException {
final HttpURLConnection connection = openConnection(
"/contentSummary" + path, "ugi=" + getUgiParameter());
"/contentSummary" + ServletUtil.encodePath(path),
"ugi=" + getEncodedUgiParameter());
InputStream in = null;
try {
in = connection.getInputStream();

View File

@ -123,42 +123,42 @@ public class HsftpFileSystem extends HftpFileSystem {
@Override
protected HttpURLConnection openConnection(String path, String query)
throws IOException {
query = addDelegationTokenParam(query);
final URL url = new URL("https", nnAddr.getHostName(),
nnAddr.getPort(), path + '?' + query);
HttpsURLConnection conn = (HttpsURLConnection)url.openConnection();
// bypass hostname verification
try {
query = updateQuery(query);
final URL url = new URI("https", null, nnAddr.getHostName(), nnAddr
.getPort(), path, query, null).toURL();
HttpsURLConnection conn = (HttpsURLConnection) url.openConnection();
// bypass hostname verification
conn.setHostnameVerifier(new DummyHostnameVerifier());
conn.setRequestMethod("GET");
conn.connect();
} catch (IOException ioe) {
throwIOExceptionFromConnection(conn, ioe);
}
// check cert expiration date
final int warnDays = ExpWarnDays;
if (warnDays > 0) { // make sure only check once
ExpWarnDays = 0;
long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY
+ System.currentTimeMillis();
X509Certificate[] clientCerts = (X509Certificate[]) conn
.getLocalCertificates();
if (clientCerts != null) {
for (X509Certificate cert : clientCerts) {
long expTime = cert.getNotAfter().getTime();
if (expTime < expTimeThreshold) {
StringBuilder sb = new StringBuilder();
sb.append("\n Client certificate "
+ cert.getSubjectX500Principal().getName());
int dayOffSet = (int) ((expTime - System.currentTimeMillis()) / MM_SECONDS_PER_DAY);
sb.append(" have " + dayOffSet + " days to expire");
LOG.warn(sb.toString());
}
// check cert expiration date
final int warnDays = ExpWarnDays;
if (warnDays > 0) { // make sure only check once
ExpWarnDays = 0;
long expTimeThreshold = warnDays * MM_SECONDS_PER_DAY
+ System.currentTimeMillis();
X509Certificate[] clientCerts = (X509Certificate[]) conn
.getLocalCertificates();
if (clientCerts != null) {
for (X509Certificate cert : clientCerts) {
long expTime = cert.getNotAfter().getTime();
if (expTime < expTimeThreshold) {
StringBuilder sb = new StringBuilder();
sb.append("\n Client certificate "
+ cert.getSubjectX500Principal().getName());
int dayOffSet = (int) ((expTime - System.currentTimeMillis()) / MM_SECONDS_PER_DAY);
sb.append(" have " + dayOffSet + " days to expire");
LOG.warn(sb.toString());
}
}
}
return (HttpURLConnection) conn;
} catch (URISyntaxException e) {
throw (IOException) new IOException().initCause(e);
}
return (HttpURLConnection) conn;
}
@Override

View File

@ -75,7 +75,9 @@ class LeaseRenewer {
/** Get a {@link LeaseRenewer} instance */
static LeaseRenewer getInstance(final String authority,
final UserGroupInformation ugi, final DFSClient dfsc) throws IOException {
return Factory.INSTANCE.get(authority, ugi, dfsc);
final LeaseRenewer r = Factory.INSTANCE.get(authority, ugi);
r.addClient(dfsc);
return r;
}
/**
@ -132,14 +134,13 @@ class LeaseRenewer {
/** Get a renewer. */
private synchronized LeaseRenewer get(final String authority,
final UserGroupInformation ugi, final DFSClient dfsc) {
final UserGroupInformation ugi) {
final Key k = new Key(authority, ugi);
LeaseRenewer r = renewers.get(k);
if (r == null) {
r = new LeaseRenewer(k);
renewers.put(k, r);
}
r.addClient(dfsc);
return r;
}
@ -196,7 +197,7 @@ class LeaseRenewer {
private LeaseRenewer(Factory.Key factorykey) {
this.factorykey = factorykey;
setGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
unsyncSetGraceSleepPeriod(LEASE_RENEWER_GRACE_DEFAULT);
if (LOG.isTraceEnabled()) {
instantiationTrace = StringUtils.stringifyException(
@ -251,6 +252,10 @@ class LeaseRenewer {
/** Set the grace period and adjust the sleep period accordingly. */
synchronized void setGraceSleepPeriod(final long gracePeriod) {
unsyncSetGraceSleepPeriod(gracePeriod);
}
private void unsyncSetGraceSleepPeriod(final long gracePeriod) {
if (gracePeriod < 100L) {
throw new HadoopIllegalArgumentException(gracePeriod
+ " = gracePeriod < 100ms is too small.");

View File

@ -565,7 +565,6 @@ public interface ClientProtocol extends VersionedProtocol {
* <li> [3] contains number of under replicated blocks in the system.</li>
* <li> [4] contains number of blocks with a corrupt replica. </li>
* <li> [5] contains number of blocks without any good replicas left. </li>
* <li> [5] contains number of blocks without any good replicas left. </li>
* <li> [6] contains the total used space of the block pool. </li>
* </ul>
* Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of

View File

@ -18,68 +18,71 @@
package org.apache.hadoop.hdfs.protocol;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
/************************************
* Some handy constants
*
*
************************************/
@InterfaceAudience.Private
public interface FSConstants {
public final class FSConstants {
/* Hidden constructor */
private FSConstants() {
}
public static int MIN_BLOCKS_FOR_WRITE = 5;
// Long that indicates "leave current quota unchanged"
public static final long QUOTA_DONT_SET = Long.MAX_VALUE;
public static final long QUOTA_RESET = -1L;
//
// Timeouts, constants
//
public static long HEARTBEAT_INTERVAL = 3;
public static long BLOCKREPORT_INTERVAL = 60 * 60 * 1000;
public static long BLOCKREPORT_INITIAL_DELAY = 0;
public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
public static final long LEASE_RECOVER_PERIOD = 10 * 1000; //in ms
// We need to limit the length and depth of a path in the filesystem. HADOOP-438
// Currently we set the maximum length to 8k characters and the maximum depth to 1k.
public static final long LEASE_RECOVER_PERIOD = 10 * 1000; // in ms
// We need to limit the length and depth of a path in the filesystem.
// HADOOP-438
// Currently we set the maximum length to 8k characters and the maximum depth
// to 1k.
public static int MAX_PATH_LENGTH = 8000;
public static int MAX_PATH_DEPTH = 1000;
public static final int BUFFER_SIZE = new HdfsConfiguration().getInt("io.file.buffer.size", 4096);
//Used for writing header etc.
public static final int SMALL_BUFFER_SIZE = Math.min(BUFFER_SIZE/2, 512);
//TODO mb@media-style.com: should be conf injected?
public static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
public static final int DEFAULT_BYTES_PER_CHECKSUM = 512;
public static final int DEFAULT_WRITE_PACKET_SIZE = 64 * 1024;
public static final short DEFAULT_REPLICATION_FACTOR = 3;
public static final int DEFAULT_FILE_BUFFER_SIZE = 4096;
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
public static final int SIZE_OF_INTEGER = Integer.SIZE / Byte.SIZE;
// TODO mb@media-style.com: should be conf injected?
public static final int DEFAULT_DATA_SOCKET_SIZE = 128 * 1024;
public static final int IO_FILE_BUFFER_SIZE = new HdfsConfiguration().getInt(
DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
// Used for writing header etc.
public static final int SMALL_BUFFER_SIZE = Math.min(IO_FILE_BUFFER_SIZE / 2,
512);
public static final int BYTES_IN_INTEGER = Integer.SIZE / Byte.SIZE;
// SafeMode actions
public enum SafeModeAction{ SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; }
public enum SafeModeAction {
SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET;
}
// type of the datanode report
public static enum DatanodeReportType {ALL, LIVE, DEAD }
public static enum DatanodeReportType {
ALL, LIVE, DEAD
}
// An invalid transaction ID that will never be seen in a real namesystem.
public static final long INVALID_TXID = -12345;
/**
* Distributed upgrade actions:
*
* 1. Get upgrade status.
* 2. Get detailed upgrade status.
* 3. Proceed with the upgrade if it is stuck, no matter what the status is.
* 1. Get upgrade status. 2. Get detailed upgrade status. 3. Proceed with the
* upgrade if it is stuck, no matter what the status is.
*/
public static enum UpgradeAction {
GET_STATUS,
DETAILED_STATUS,
FORCE_PROCEED;
GET_STATUS, DETAILED_STATUS, FORCE_PROCEED;
}
/**
@ -90,6 +93,6 @@ public interface FSConstants {
/**
* Please see {@link LayoutVersion} on adding new layout version.
*/
public static final int LAYOUT_VERSION =
LayoutVersion.getCurrentLayoutVersion();
public static final int LAYOUT_VERSION = LayoutVersion
.getCurrentLayoutVersion();
}

View File

@ -309,10 +309,10 @@ public class Balancer {
target.datanode.getName()), HdfsConstants.READ_TIMEOUT);
sock.setKeepAlive(true);
out = new DataOutputStream( new BufferedOutputStream(
sock.getOutputStream(), FSConstants.BUFFER_SIZE));
sock.getOutputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
sendRequest(out);
in = new DataInputStream( new BufferedInputStream(
sock.getInputStream(), FSConstants.BUFFER_SIZE));
sock.getInputStream(), FSConstants.IO_FILE_BUFFER_SIZE));
receiveResponse(in);
bytesMoved.inc(block.getNumBytes());
LOG.info( "Moving block " + block.getBlock().getBlockId() +

View File

@ -259,26 +259,6 @@ public class BlockInfo extends Block implements LightWeightGSet.LinkedElement {
return head;
}
boolean listIsConsistent(DatanodeDescriptor dn) {
// going forward
int count = 0;
BlockInfo next, nextPrev;
BlockInfo cur = this;
while(cur != null) {
next = cur.getNext(cur.findDatanode(dn));
if(next != null) {
nextPrev = next.getPrevious(next.findDatanode(dn));
if(cur != nextPrev) {
System.out.println("Inconsistent list: cur->next->prev != cur");
return false;
}
}
cur = next;
count++;
}
return true;
}
/**
* BlockInfo represents a block that is not being constructed.
* In order to start modifying the block, the BlockInfo should be converted

View File

@ -34,7 +34,6 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.server.namenode.FSClusterStats;
import org.apache.hadoop.hdfs.server.namenode.FSInodeInfo;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.Node;
import org.apache.hadoop.net.NodeBase;

View File

@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.util.LightWeightGSet;
* block's metadata currently includes INode it belongs to and
* the datanodes that store the block.
*/
public class BlocksMap {
class BlocksMap {
private static class NodeIterator implements Iterator<DatanodeDescriptor> {
private BlockInfo blockInfo;
private int nextIdx = 0;
@ -101,7 +101,7 @@ public class BlocksMap {
/**
* Add block b belonging to the specified file inode to the map.
*/
public BlockInfo addINode(BlockInfo b, INodeFile iNode) {
BlockInfo addINode(BlockInfo b, INodeFile iNode) {
BlockInfo info = blocks.get(b);
if (info != b) {
info = b;
@ -137,7 +137,7 @@ public class BlocksMap {
* Searches for the block in the BlocksMap and
* returns Iterator that iterates through the nodes the block belongs to.
*/
public Iterator<DatanodeDescriptor> nodeIterator(Block b) {
Iterator<DatanodeDescriptor> nodeIterator(Block b) {
return nodeIterator(blocks.get(b));
}
@ -182,27 +182,6 @@ public class BlocksMap {
Iterable<BlockInfo> getBlocks() {
return blocks;
}
/**
* Check if the block exists in map
*/
public boolean contains(Block block) {
return blocks.contains(block);
}
/**
* Check if the replica at the given datanode exists in map
*/
boolean contains(Block block, DatanodeDescriptor datanode) {
BlockInfo info = blocks.get(block);
if (info == null)
return false;
if (-1 == info.findDatanode(datanode))
return false;
return true;
}
/** Get the capacity of the HashMap that stores blocks */
int getCapacity() {

View File

@ -17,6 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import java.io.IOException;
import java.io.PrintWriter;
import java.net.InetAddress;
@ -32,6 +34,7 @@ import java.util.TreeMap;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.HadoopIllegalArgumentException;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.conf.Configuration;
@ -49,6 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.BlockTar
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand;
import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@ -56,13 +60,12 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeCommand;
import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException;
import org.apache.hadoop.hdfs.server.protocol.BalancerBandwidthCommand;
import org.apache.hadoop.hdfs.util.CyclicIteration;
import org.apache.hadoop.ipc.Server;
import org.apache.hadoop.net.CachedDNSToSwitchMapping;
import org.apache.hadoop.net.DNSToSwitchMapping;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.ScriptBasedMapping;
import org.apache.hadoop.util.CyclicIteration;
import org.apache.hadoop.util.Daemon;
import org.apache.hadoop.util.HostsFileReader;
import org.apache.hadoop.util.ReflectionUtils;
@ -75,7 +78,10 @@ import org.apache.hadoop.util.ReflectionUtils;
public class DatanodeManager {
static final Log LOG = LogFactory.getLog(DatanodeManager.class);
final FSNamesystem namesystem;
private final FSNamesystem namesystem;
private final BlockManager blockManager;
private final HeartbeatManager heartbeatManager;
/**
* Stores the datanode -> block map.
@ -117,9 +123,14 @@ public class DatanodeManager {
/** Ask Datanode only up to this many blocks to delete. */
final int blockInvalidateLimit;
DatanodeManager(final FSNamesystem namesystem, final Configuration conf
DatanodeManager(final BlockManager blockManager,
final FSNamesystem namesystem, final Configuration conf
) throws IOException {
this.namesystem = namesystem;
this.blockManager = blockManager;
this.heartbeatManager = new HeartbeatManager(namesystem, conf);
this.hostsReader = new HostsFileReader(
conf.get(DFSConfigKeys.DFS_HOSTS, ""),
conf.get(DFSConfigKeys.DFS_HOSTS_EXCLUDE, ""));
@ -158,17 +169,30 @@ public class DatanodeManager {
conf.getInt(DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_DECOMMISSION_NODES_PER_INTERVAL_DEFAULT)));
decommissionthread.start();
heartbeatManager.activate(conf);
}
void close() {
if (decommissionthread != null) decommissionthread.interrupt();
heartbeatManager.close();
}
/** @return the network topology. */
public NetworkTopology getNetworkTopology() {
return networktopology;
}
/** @return the heartbeat manager. */
HeartbeatManager getHeartbeatManager() {
return heartbeatManager;
}
/** @return the datanode statistics. */
public DatanodeStatistics getDatanodeStatistics() {
return heartbeatManager;
}
/** Sort the located blocks by the distance to the target host. */
public void sortLocatedBlocks(final String targethost,
final List<LocatedBlock> locatedblocks) {
@ -231,9 +255,44 @@ public class DatanodeManager {
}
}
/**
* Remove a datanode descriptor.
* @param nodeInfo datanode descriptor.
*/
private void removeDatanode(DatanodeDescriptor nodeInfo) {
assert namesystem.hasWriteLock();
heartbeatManager.removeDatanode(nodeInfo);
blockManager.removeBlocksAssociatedTo(nodeInfo);
networktopology.remove(nodeInfo);
if (LOG.isDebugEnabled()) {
LOG.debug("remove datanode " + nodeInfo.getName());
}
namesystem.checkSafeMode();
}
/**
* Remove a datanode
* @throws UnregisteredNodeException
*/
public void removeDatanode(final DatanodeID node
) throws UnregisteredNodeException {
namesystem.writeLock();
try {
final DatanodeDescriptor descriptor = getDatanode(node);
if (descriptor != null) {
removeDatanode(descriptor);
} else {
NameNode.stateChangeLog.warn("BLOCK* removeDatanode: "
+ node.getName() + " does not exist");
}
} finally {
namesystem.writeUnlock();
}
}
/** Remove a dead datanode. */
public void removeDeadDatanode(final DatanodeID nodeID) {
synchronized(namesystem.heartbeats) {
void removeDeadDatanode(final DatanodeID nodeID) {
synchronized(datanodeMap) {
DatanodeDescriptor d;
try {
@ -244,14 +303,13 @@ public class DatanodeManager {
if (d != null && isDatanodeDead(d)) {
NameNode.stateChangeLog.info(
"BLOCK* removeDeadDatanode: lost heartbeat from " + d.getName());
namesystem.removeDatanode(d);
removeDatanode(d);
}
}
}
}
/** Is the datanode dead? */
public boolean isDatanodeDead(DatanodeDescriptor node) {
boolean isDatanodeDead(DatanodeDescriptor node) {
return (node.getLastUpdate() <
(Util.now() - heartbeatExpireInterval));
}
@ -348,7 +406,7 @@ public class DatanodeManager {
* @param nodeList
* , array list of live or dead nodes.
*/
public void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) {
private void removeDecomNodeFromList(final List<DatanodeDescriptor> nodeList) {
// If the include list is empty, any nodes are welcomed and it does not
// make sense to exclude any nodes from the cluster. Therefore, no remove.
if (hostsReader.getHosts().isEmpty()) {
@ -423,11 +481,48 @@ public class DatanodeManager {
throws IOException {
// If the registered node is in exclude list, then decommission it
if (inExcludedHostsList(nodeReg, ipAddr)) {
namesystem.getBlockManager().startDecommission(nodeReg);
startDecommission(nodeReg);
}
}
/**
* Change, if appropriate, the admin state of a datanode to
* decommission completed. Return true if decommission is complete.
*/
boolean checkDecommissionState(DatanodeDescriptor node) {
// Check to see if all blocks in this decommissioned
// node has reached their target replication factor.
if (node.isDecommissionInProgress()) {
if (!blockManager.isReplicationInProgress(node)) {
node.setDecommissioned();
LOG.info("Decommission complete for node " + node.getName());
}
}
return node.isDecommissioned();
}
/** Start decommissioning the specified datanode. */
private void startDecommission(DatanodeDescriptor node) throws IOException {
if (!node.isDecommissionInProgress() && !node.isDecommissioned()) {
LOG.info("Start Decommissioning node " + node.getName() + " with " +
node.numBlocks() + " blocks.");
heartbeatManager.startDecommission(node);
node.decommissioningStatus.setStartTime(now());
// all the blocks that reside on this node have to be replicated.
checkDecommissionState(node);
}
}
/** Stop decommissioning the specified datanodes. */
void stopDecommission(DatanodeDescriptor node) throws IOException {
if (node.isDecommissionInProgress() || node.isDecommissioned()) {
LOG.info("Stop Decommissioning node " + node.getName());
heartbeatManager.stopDecommission(node);
blockManager.processOverReplicatedBlocksOnReCommission(node);
}
}
/**
* Generate new storage ID.
*
@ -469,7 +564,7 @@ public class DatanodeManager {
nodeReg.getInfoPort(),
nodeReg.getIpcPort());
nodeReg.updateRegInfo(dnReg);
nodeReg.exportedKeys = namesystem.getBlockManager().getBlockKeys();
nodeReg.exportedKeys = blockManager.getBlockKeys();
NameNode.stateChangeLog.info("BLOCK* NameSystem.registerDatanode: "
+ "node registration from " + nodeReg.getName()
@ -483,7 +578,7 @@ public class DatanodeManager {
+ "node from name: " + nodeN.getName());
// nodeN previously served a different data storage,
// which is not served by anybody anymore.
namesystem.removeDatanode(nodeN);
removeDatanode(nodeN);
// physically remove node from datanodeMap
wipeDatanode(nodeN);
nodeN = null;
@ -525,14 +620,7 @@ public class DatanodeManager {
getNetworkTopology().add(nodeS);
// also treat the registration message as a heartbeat
synchronized(namesystem.heartbeats) {
if( !namesystem.heartbeats.contains(nodeS)) {
namesystem.heartbeats.add(nodeS);
//update its timestamp
nodeS.updateHeartbeat(0L, 0L, 0L, 0L, 0, 0);
nodeS.isAlive = true;
}
}
heartbeatManager.register(nodeS);
checkDecommissioning(nodeS, dnAddress);
return;
}
@ -556,16 +644,29 @@ public class DatanodeManager {
checkDecommissioning(nodeDescr, dnAddress);
// also treat the registration message as a heartbeat
synchronized(namesystem.heartbeats) {
namesystem.heartbeats.add(nodeDescr);
nodeDescr.isAlive = true;
// no need to update its timestamp
// because its is done when the descriptor is created
// no need to update its timestamp
// because its is done when the descriptor is created
heartbeatManager.addDatanode(nodeDescr);
}
/**
* Rereads conf to get hosts and exclude list file names.
* Rereads the files to update the hosts and exclude lists. It
* checks if any of the hosts have changed states:
*/
public void refreshNodes(final Configuration conf) throws IOException {
namesystem.checkSuperuserPrivilege();
refreshHostsReader(conf);
namesystem.writeLock();
try {
refreshDatanodes();
} finally {
namesystem.writeUnlock();
}
}
/** Reread include/exclude files. */
public void refreshHostsReader(Configuration conf) throws IOException {
private void refreshHostsReader(Configuration conf) throws IOException {
// Reread the conf to get dfs.hosts and dfs.hosts.exclude filenames.
// Update the file names and refresh internal includes and excludes list.
if (conf == null) {
@ -577,24 +678,21 @@ public class DatanodeManager {
}
/**
* Rereads the config to get hosts and exclude list file names.
* Rereads the files to update the hosts and exclude lists. It
* checks if any of the hosts have changed states:
* 1. Added to hosts --> no further work needed here.
* 2. Removed from hosts --> mark AdminState as decommissioned.
* 3. Added to exclude --> start decommission.
* 4. Removed from exclude --> stop decommission.
*/
public void refreshDatanodes() throws IOException {
private void refreshDatanodes() throws IOException {
for(DatanodeDescriptor node : datanodeMap.values()) {
// Check if not include.
if (!inHostsList(node, null)) {
node.setDisallowed(true); // case 2.
node.setDisallowed(true); // case 2.
} else {
if (inExcludedHostsList(node, null)) {
namesystem.getBlockManager().startDecommission(node); // case 3.
startDecommission(node); // case 3.
} else {
namesystem.getBlockManager().stopDecommission(node); // case 4.
stopDecommission(node); // case 4.
}
}
}
@ -626,16 +724,59 @@ public class DatanodeManager {
return numDead;
}
/** @return list of datanodes where decommissioning is in progress. */
public List<DatanodeDescriptor> getDecommissioningNodes() {
namesystem.readLock();
try {
final List<DatanodeDescriptor> decommissioningNodes
= new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> results = getDatanodeListForReport(
DatanodeReportType.LIVE);
for(DatanodeDescriptor node : results) {
if (node.isDecommissionInProgress()) {
decommissioningNodes.add(node);
}
}
return decommissioningNodes;
} finally {
namesystem.readUnlock();
}
}
/** Fetch live and dead datanodes. */
public void fetchDatanodess(final List<DatanodeDescriptor> live,
final List<DatanodeDescriptor> dead) {
final List<DatanodeDescriptor> results =
getDatanodeListForReport(DatanodeReportType.ALL);
for(DatanodeDescriptor node : results) {
if (isDatanodeDead(node))
dead.add(node);
else
live.add(node);
public void fetchDatanodes(final List<DatanodeDescriptor> live,
final List<DatanodeDescriptor> dead, final boolean removeDecommissionNode) {
if (live == null && dead == null) {
throw new HadoopIllegalArgumentException("Both live and dead lists are null");
}
namesystem.readLock();
try {
final List<DatanodeDescriptor> results =
getDatanodeListForReport(DatanodeReportType.ALL);
for(DatanodeDescriptor node : results) {
if (isDatanodeDead(node)) {
if (dead != null) {
dead.add(node);
}
} else {
if (live != null) {
live.add(node);
}
}
}
} finally {
namesystem.readUnlock();
}
if (removeDecommissionNode) {
if (live != null) {
removeDecomNodeFromList(live);
}
if (dead != null) {
removeDecomNodeFromList(dead);
}
}
}
@ -712,7 +853,7 @@ public class DatanodeManager {
long capacity, long dfsUsed, long remaining, long blockPoolUsed,
int xceiverCount, int maxTransfers, int failedVolumes
) throws IOException {
synchronized (namesystem.heartbeats) {
synchronized (heartbeatManager) {
synchronized (datanodeMap) {
DatanodeDescriptor nodeinfo = null;
try {
@ -731,10 +872,8 @@ public class DatanodeManager {
return new DatanodeCommand[]{DatanodeCommand.REGISTER};
}
namesystem.updateStats(nodeinfo, false);
nodeinfo.updateHeartbeat(capacity, dfsUsed, remaining, blockPoolUsed,
xceiverCount, failedVolumes);
namesystem.updateStats(nodeinfo, true);
heartbeatManager.updateHeartbeat(nodeinfo, capacity, dfsUsed,
remaining, blockPoolUsed, xceiverCount, failedVolumes);
//check lease recovery
BlockInfoUnderConstruction[] blocks = nodeinfo
@ -765,7 +904,7 @@ public class DatanodeManager {
blockPoolId, blks));
}
namesystem.addKeyUpdateCommand(cmds, nodeinfo);
blockManager.addKeyUpdateCommand(cmds, nodeinfo);
// check for balancer bandwidth update
if (nodeinfo.getBalancerBandwidth() > 0) {

View File

@ -0,0 +1,59 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
/** Datanode statistics */
public interface DatanodeStatistics {
/** @return the total capacity */
public long getCapacityTotal();
/** @return the used capacity */
public long getCapacityUsed();
/** @return the percentage of the used capacity over the total capacity. */
public float getCapacityUsedPercent();
/** @return the remaining capacity */
public long getCapacityRemaining();
/** @return the percentage of the remaining capacity over the total capacity. */
public float getCapacityRemainingPercent();
/** @return the block pool used. */
public long getBlockPoolUsed();
/** @return the percentage of the block pool used space over the total capacity. */
public float getPercentBlockPoolUsed();
/** @return the xceiver count */
public int getXceiverCount();
/**
* @return the total used space by data nodes for non-DFS purposes
* such as storing temporary files on the local file system
*/
public long getCapacityUsedNonDFS();
/** The same as {@link ClientProtocol#getStats()}.
* The block related entries are set to -1.
*/
public long[] getStats();
}

View File

@ -24,7 +24,6 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.util.CyclicIteration;
/**
* Manage node decommissioning.
@ -35,11 +34,9 @@ class DecommissionManager {
static final Log LOG = LogFactory.getLog(DecommissionManager.class);
private final FSNamesystem fsnamesystem;
private final BlockManager blockManager;
DecommissionManager(FSNamesystem namesystem) {
DecommissionManager(final FSNamesystem namesystem) {
this.fsnamesystem = namesystem;
this.blockManager = fsnamesystem.getBlockManager();
}
/** Periodically check decommission status. */
@ -81,16 +78,16 @@ class DecommissionManager {
}
private void check() {
final DatanodeManager dm = fsnamesystem.getBlockManager().getDatanodeManager();
int count = 0;
for(Map.Entry<String, DatanodeDescriptor> entry
: blockManager.getDatanodeManager().getDatanodeCyclicIteration(
firstkey)) {
: dm.getDatanodeCyclicIteration(firstkey)) {
final DatanodeDescriptor d = entry.getValue();
firstkey = entry.getKey();
if (d.isDecommissionInProgress()) {
try {
blockManager.checkDecommissionStateInternal(d);
dm.checkDecommissionState(d);
} catch(Exception e) {
LOG.warn("entry=" + entry, e);
}

View File

@ -0,0 +1,301 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.ArrayList;
import java.util.List;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.server.common.Util;
import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
import org.apache.hadoop.util.Daemon;
/**
* Manage the heartbeats received from datanodes.
* The datanode list and statistics are synchronized
* by the heartbeat manager lock.
*/
class HeartbeatManager implements DatanodeStatistics {
static final Log LOG = LogFactory.getLog(HeartbeatManager.class);
/**
* Stores a subset of the datanodeMap in DatanodeManager,
* containing nodes that are considered alive.
* The HeartbeatMonitor periodically checks for out-dated entries,
* and removes them from the list.
* It is synchronized by the heartbeat manager lock.
*/
private final List<DatanodeDescriptor> datanodes = new ArrayList<DatanodeDescriptor>();
/** Statistics, which are synchronized by the heartbeat manager lock. */
private final Stats stats = new Stats();
/** The time period to check for expired datanodes */
private final long heartbeatRecheckInterval;
/** Heartbeat monitor thread */
private final Daemon heartbeatThread = new Daemon(new Monitor());
final FSNamesystem namesystem;
HeartbeatManager(final FSNamesystem namesystem, final Configuration conf) {
this.heartbeatRecheckInterval = conf.getInt(
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY,
DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_DEFAULT); // 5 minutes
this.namesystem = namesystem;
}
void activate(Configuration conf) {
heartbeatThread.start();
}
void close() {
heartbeatThread.interrupt();
}
synchronized int getLiveDatanodeCount() {
return datanodes.size();
}
@Override
public synchronized long getCapacityTotal() {
return stats.capacityTotal;
}
@Override
public synchronized long getCapacityUsed() {
return stats.capacityUsed;
}
@Override
public synchronized float getCapacityUsedPercent() {
return DFSUtil.getPercentUsed(stats.capacityUsed, stats.capacityTotal);
}
@Override
public synchronized long getCapacityRemaining() {
return stats.capacityRemaining;
}
@Override
public synchronized float getCapacityRemainingPercent() {
return DFSUtil.getPercentRemaining(
stats.capacityRemaining, stats.capacityTotal);
}
@Override
public synchronized long getBlockPoolUsed() {
return stats.blockPoolUsed;
}
@Override
public synchronized float getPercentBlockPoolUsed() {
return DFSUtil.getPercentUsed(stats.blockPoolUsed, stats.capacityTotal);
}
@Override
public synchronized long getCapacityUsedNonDFS() {
final long nonDFSUsed = stats.capacityTotal
- stats.capacityRemaining - stats.capacityUsed;
return nonDFSUsed < 0L? 0L : nonDFSUsed;
}
@Override
public synchronized int getXceiverCount() {
return stats.xceiverCount;
}
@Override
public synchronized long[] getStats() {
return new long[] {getCapacityTotal(),
getCapacityUsed(),
getCapacityRemaining(),
-1L,
-1L,
-1L,
getBlockPoolUsed()};
}
synchronized void register(final DatanodeDescriptor d) {
if (!datanodes.contains(d)) {
addDatanode(d);
//update its timestamp
d.updateHeartbeat(0L, 0L, 0L, 0L, 0, 0);
}
}
synchronized DatanodeDescriptor[] getDatanodes() {
return datanodes.toArray(new DatanodeDescriptor[datanodes.size()]);
}
synchronized void addDatanode(final DatanodeDescriptor d) {
datanodes.add(d);
d.isAlive = true;
}
synchronized void removeDatanode(DatanodeDescriptor node) {
if (node.isAlive) {
stats.subtract(node);
datanodes.remove(node);
node.isAlive = false;
}
}
synchronized void updateHeartbeat(final DatanodeDescriptor node,
long capacity, long dfsUsed, long remaining, long blockPoolUsed,
int xceiverCount, int failedVolumes) {
stats.subtract(node);
node.updateHeartbeat(capacity, dfsUsed, remaining, blockPoolUsed,
xceiverCount, failedVolumes);
stats.add(node);
}
synchronized void startDecommission(final DatanodeDescriptor node) {
stats.subtract(node);
node.startDecommission();
stats.add(node);
}
synchronized void stopDecommission(final DatanodeDescriptor node) {
stats.subtract(node);
node.stopDecommission();
stats.add(node);
}
/**
* Check if there are any expired heartbeats, and if so,
* whether any blocks have to be re-replicated.
* While removing dead datanodes, make sure that only one datanode is marked
* dead at a time within the synchronized section. Otherwise, a cascading
* effect causes more datanodes to be declared dead.
*/
void heartbeatCheck() {
final DatanodeManager dm = namesystem.getBlockManager().getDatanodeManager();
// It's OK to check safe mode w/o taking the lock here, we re-check
// for safe mode after taking the lock before removing a datanode.
if (namesystem.isInSafeMode()) {
return;
}
boolean allAlive = false;
while (!allAlive) {
// locate the first dead node.
DatanodeID dead = null;
synchronized(this) {
for (DatanodeDescriptor d : datanodes) {
if (dm.isDatanodeDead(d)) {
namesystem.incrExpiredHeartbeats();
dead = d;
break;
}
}
}
allAlive = dead == null;
if (!allAlive) {
// acquire the fsnamesystem lock, and then remove the dead node.
namesystem.writeLock();
if (namesystem.isInSafeMode()) {
return;
}
try {
synchronized(this) {
dm.removeDeadDatanode(dead);
}
} finally {
namesystem.writeUnlock();
}
}
}
}
/** Periodically check heartbeat and update block key */
private class Monitor implements Runnable {
private long lastHeartbeatCheck;
private long lastBlockKeyUpdate;
@Override
public void run() {
while(namesystem.isRunning()) {
try {
final long now = Util.now();
if (lastHeartbeatCheck + heartbeatRecheckInterval < now) {
heartbeatCheck();
lastHeartbeatCheck = now;
}
if (namesystem.getBlockManager().shouldUpdateBlockKey(
now - lastBlockKeyUpdate)) {
synchronized(HeartbeatManager.this) {
for(DatanodeDescriptor d : datanodes) {
d.needKeyUpdate = true;
}
}
lastBlockKeyUpdate = now;
}
} catch (Exception e) {
LOG.error("Exception while checking heartbeat", e);
}
try {
Thread.sleep(5000); // 5 seconds
} catch (InterruptedException ie) {
}
}
}
}
/** Datanode statistics.
* For decommissioning/decommissioned nodes, only used capacity is counted.
*/
private static class Stats {
private long capacityTotal = 0L;
private long capacityUsed = 0L;
private long capacityRemaining = 0L;
private long blockPoolUsed = 0L;
private int xceiverCount = 0;
private void add(final DatanodeDescriptor node) {
capacityUsed += node.getDfsUsed();
blockPoolUsed += node.getBlockPoolUsed();
xceiverCount += node.getXceiverCount();
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
capacityTotal += node.getCapacity();
capacityRemaining += node.getRemaining();
} else {
capacityTotal += node.getDfsUsed();
}
}
private void subtract(final DatanodeDescriptor node) {
capacityUsed -= node.getDfsUsed();
blockPoolUsed -= node.getBlockPoolUsed();
xceiverCount -= node.getXceiverCount();
if (!(node.isDecommissionInProgress() || node.isDecommissioned())) {
capacityTotal -= node.getCapacity();
capacityRemaining -= node.getRemaining();
} else {
capacityTotal -= node.getDfsUsed();
}
}
}
}

View File

@ -17,21 +17,26 @@
*/
package org.apache.hadoop.hdfs.server.blockmanagement;
import java.util.*;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.NavigableSet;
import java.util.TreeSet;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.namenode.NameNode;
/* Class for keeping track of under replication blocks
/** Keep track of under replication blocks.
* Blocks have replication priority, with priority 0 indicating the highest
* Blocks have only one replicas has the highest
*/
public class UnderReplicatedBlocks implements Iterable<Block> {
class UnderReplicatedBlocks implements Iterable<Block> {
static final int LEVEL = 5;
static public final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
private List<TreeSet<Block>> priorityQueues = new ArrayList<TreeSet<Block>>();
static final int QUEUE_WITH_CORRUPT_BLOCKS = 4;
private final List<NavigableSet<Block>> priorityQueues
= new ArrayList<NavigableSet<Block>>();
/* constructor */
/** Create an object. */
UnderReplicatedBlocks() {
for(int i=0; i<LEVEL; i++) {
priorityQueues.add(new TreeSet<Block>());
@ -47,8 +52,8 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
}
}
/* Return the total number of under replication blocks */
public synchronized int size() {
/** Return the total number of under replication blocks */
synchronized int size() {
int size = 0;
for (int i=0; i<LEVEL; i++) {
size += priorityQueues.get(i).size();
@ -56,7 +61,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
return size;
}
/* Return the number of under replication blocks excluding corrupt blocks */
/** Return the number of under replication blocks excluding corrupt blocks */
synchronized int getUnderReplicatedBlockCount() {
int size = 0;
for (int i=0; i<QUEUE_WITH_CORRUPT_BLOCKS; i++) {
@ -70,15 +75,15 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
return priorityQueues.get(QUEUE_WITH_CORRUPT_BLOCKS).size();
}
/* Check if a block is in the neededReplication queue */
public synchronized boolean contains(Block block) {
for(TreeSet<Block> set:priorityQueues) {
/** Check if a block is in the neededReplication queue */
synchronized boolean contains(Block block) {
for(NavigableSet<Block> set : priorityQueues) {
if(set.contains(block)) { return true; }
}
return false;
}
/* Return the priority of a block
/** Return the priority of a block
* @param block a under replication block
* @param curReplicas current number of replicas of the block
* @param expectedReplicas expected number of replicas of the block
@ -106,7 +111,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
}
}
/* add a block to a under replication queue according to its priority
/** add a block to a under replication queue according to its priority
* @param block a under replication block
* @param curReplicas current number of replicas of the block
* @param expectedReplicas expected number of replicas of the block
@ -134,7 +139,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
return false;
}
/* remove a block from a under replication queue */
/** remove a block from a under replication queue */
synchronized boolean remove(Block block,
int oldReplicas,
int decommissionedReplicas,
@ -145,7 +150,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
return remove(block, priLevel);
}
/* remove a block from a under replication queue given a priority*/
/** remove a block from a under replication queue given a priority*/
boolean remove(Block block, int priLevel) {
if(priLevel >= 0 && priLevel < LEVEL
&& priorityQueues.get(priLevel).remove(block)) {
@ -174,7 +179,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
return false;
}
/* update the priority level of a block */
/** update the priority level of a block */
synchronized void update(Block block, int curReplicas,
int decommissionedReplicas,
int curExpectedReplicas,
@ -209,30 +214,29 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
}
}
/* returns an iterator of all blocks in a given priority queue */
/** returns an iterator of all blocks in a given priority queue */
synchronized BlockIterator iterator(int level) {
return new BlockIterator(level);
}
/* return an iterator of all the under replication blocks */
/** return an iterator of all the under replication blocks */
public synchronized BlockIterator iterator() {
return new BlockIterator();
}
public class BlockIterator implements Iterator<Block> {
class BlockIterator implements Iterator<Block> {
private int level;
private boolean isIteratorForLevel = false;
private List<Iterator<Block>> iterators = new ArrayList<Iterator<Block>>();
BlockIterator()
{
private BlockIterator() {
level=0;
for(int i=0; i<LEVEL; i++) {
iterators.add(priorityQueues.get(i).iterator());
}
}
BlockIterator(int l) {
private BlockIterator(int l) {
level = l;
isIteratorForLevel = true;
iterators.add(priorityQueues.get(level).iterator());
@ -246,6 +250,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
}
}
@Override
public Block next() {
if (isIteratorForLevel)
return iterators.get(0).next();
@ -253,6 +258,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
return iterators.get(level).next();
}
@Override
public boolean hasNext() {
if (isIteratorForLevel)
return iterators.get(0).hasNext();
@ -260,6 +266,7 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
return iterators.get(level).hasNext();
}
@Override
public void remove() {
if (isIteratorForLevel)
iterators.get(0).remove();
@ -267,8 +274,8 @@ public class UnderReplicatedBlocks implements Iterable<Block> {
iterators.get(level).remove();
}
public int getPriority() {
int getPriority() {
return level;
};
}
}
}

View File

@ -29,7 +29,10 @@ import org.apache.hadoop.classification.InterfaceAudience;
************************************/
@InterfaceAudience.Private
public interface HdfsConstants {
public final class HdfsConstants {
/* Hidden constructor */
private HdfsConstants() { }
/**
* Type of the node
*/

View File

@ -26,11 +26,11 @@ import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.URL;
import java.net.URLEncoder;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.TreeSet;
import javax.servlet.ServletContext;
@ -190,13 +190,15 @@ public class JspHelper {
s.connect(addr, HdfsConstants.READ_TIMEOUT);
s.setSoTimeout(HdfsConstants.READ_TIMEOUT);
long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
long amtToRead = Math.min(chunkSizeToView, blockSize - offsetIntoBlock);
// Use the block name for file name.
String file = BlockReader.getFileName(addr, poolId, blockId);
BlockReader blockReader = BlockReader.newBlockReader(s, file,
int bufferSize = conf.getInt(DFSConfigKeys.IO_FILE_BUFFER_SIZE_KEY,
DFSConfigKeys.IO_FILE_BUFFER_SIZE_DEFAULT);
String file = BlockReader.getFileName(addr, poolId, blockId);
BlockReader blockReader = BlockReader.newBlockReader(s, file,
new ExtendedBlock(poolId, blockId, 0, genStamp), blockToken,
offsetIntoBlock, amtToRead, conf.getInt("io.file.buffer.size", 4096));
offsetIntoBlock, amtToRead, bufferSize);
byte[] buf = new byte[(int)amtToRead];
int readOffset = 0;
@ -249,7 +251,7 @@ public class JspHelper {
out.print("</tbody></table>");
}
public static void sortNodeList(ArrayList<DatanodeDescriptor> nodes,
public static void sortNodeList(final List<DatanodeDescriptor> nodes,
String field, String order) {
class NodeComapare implements Comparator<DatanodeDescriptor> {

View File

@ -54,7 +54,7 @@ import org.apache.hadoop.util.PureJavaCrc32;
* may copies it to another site. If a throttler is provided,
* streaming throttling is also supported.
**/
class BlockReceiver implements Closeable, FSConstants {
class BlockReceiver implements Closeable {
public static final Log LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
@ -179,8 +179,7 @@ class BlockReceiver implements Closeable, FSConstants {
this.out = streams.dataOut;
this.cout = streams.checksumOut;
this.checksumOut = new DataOutputStream(new BufferedOutputStream(
streams.checksumOut,
SMALL_BUFFER_SIZE));
streams.checksumOut, FSConstants.SMALL_BUFFER_SIZE));
// write data chunk header if creating a new replica
if (isCreate) {
BlockMetadataHeader.writeHeader(checksumOut, checksum);
@ -399,7 +398,7 @@ class BlockReceiver implements Closeable, FSConstants {
buf.limit(bufRead);
}
while (buf.remaining() < SIZE_OF_INTEGER) {
while (buf.remaining() < FSConstants.BYTES_IN_INTEGER) {
if (buf.position() > 0) {
shiftBufData();
}
@ -418,9 +417,10 @@ class BlockReceiver implements Closeable, FSConstants {
payloadLen);
}
// Subtract SIZE_OF_INTEGER since that accounts for the payloadLen that
// Subtract BYTES_IN_INTEGER since that accounts for the payloadLen that
// we read above.
int pktSize = payloadLen + PacketHeader.PKT_HEADER_LEN - SIZE_OF_INTEGER;
int pktSize = payloadLen + PacketHeader.PKT_HEADER_LEN
- FSConstants.BYTES_IN_INTEGER;
if (buf.remaining() < pktSize) {
//we need to read more data
@ -817,7 +817,7 @@ class BlockReceiver implements Closeable, FSConstants {
* Processed responses from downstream datanodes in the pipeline
* and sends back replies to the originator.
*/
class PacketResponder implements Runnable, Closeable, FSConstants {
class PacketResponder implements Runnable, Closeable {
/** queue for packets waiting for ack */
private final LinkedList<Packet> ackQueue = new LinkedList<Packet>();

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.util.DataChecksum;
/**
* Reads a block from the disk and sends it to a recipient.
*/
class BlockSender implements java.io.Closeable, FSConstants {
class BlockSender implements java.io.Closeable {
public static final Log LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
@ -155,7 +155,7 @@ class BlockSender implements java.io.Closeable, FSConstants {
if ( !corruptChecksumOk || datanode.data.metaFileExists(block) ) {
checksumIn = new DataInputStream(new BufferedInputStream(datanode.data
.getMetaDataInputStream(block), BUFFER_SIZE));
.getMetaDataInputStream(block), FSConstants.IO_FILE_BUFFER_SIZE));
// read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
@ -472,15 +472,15 @@ class BlockSender implements java.io.Closeable, FSConstants {
streamForSendChunks = baseStream;
// assure a mininum buffer size.
maxChunksPerPacket = (Math.max(BUFFER_SIZE,
maxChunksPerPacket = (Math.max(FSConstants.IO_FILE_BUFFER_SIZE,
MIN_BUFFER_WITH_TRANSFERTO)
+ bytesPerChecksum - 1)/bytesPerChecksum;
// allocate smaller buffer while using transferTo().
pktSize += checksumSize * maxChunksPerPacket;
} else {
maxChunksPerPacket = Math.max(1,
(BUFFER_SIZE + bytesPerChecksum - 1)/bytesPerChecksum);
maxChunksPerPacket = Math.max(1, (FSConstants.IO_FILE_BUFFER_SIZE
+ bytesPerChecksum - 1) / bytesPerChecksum);
pktSize += (bytesPerChecksum + checksumSize) * maxChunksPerPacket;
}

View File

@ -169,7 +169,7 @@ import org.mortbay.util.ajax.JSON;
**********************************************************/
@InterfaceAudience.Private
public class DataNode extends Configured
implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants,
implements InterDatanodeProtocol, ClientDatanodeProtocol,
DataNodeMXBean {
public static final Log LOG = LogFactory.getLog(DataNode.class);
@ -348,7 +348,7 @@ public class DataNode extends Configured
ThreadGroup threadGroup = null;
long blockReportInterval;
boolean resetBlockReportTime = true;
long initialBlockReportDelay = BLOCKREPORT_INITIAL_DELAY * 1000L;
long initialBlockReportDelay = DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT * 1000L;
long heartBeatInterval;
private boolean heartbeatsDisabledForTests = false;
private DataStorage storage = null;
@ -440,21 +440,23 @@ public class DataNode extends Configured
HdfsConstants.WRITE_TIMEOUT);
/* Based on results on different platforms, we might need set the default
* to false on some of them. */
this.transferToAllowed = conf.getBoolean("dfs.datanode.transferTo.allowed",
true);
this.transferToAllowed = conf.getBoolean(
DFS_DATANODE_TRANSFERTO_ALLOWED_KEY,
DFS_DATANODE_TRANSFERTO_ALLOWED_DEFAULT);
this.writePacketSize = conf.getInt(DFS_CLIENT_WRITE_PACKET_SIZE_KEY,
DFS_CLIENT_WRITE_PACKET_SIZE_DEFAULT);
this.blockReportInterval =
conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY, BLOCKREPORT_INTERVAL);
this.initialBlockReportDelay = conf.getLong(DFS_BLOCKREPORT_INITIAL_DELAY_KEY,
BLOCKREPORT_INITIAL_DELAY)* 1000L;
this.blockReportInterval = conf.getLong(DFS_BLOCKREPORT_INTERVAL_MSEC_KEY,
DFS_BLOCKREPORT_INTERVAL_MSEC_DEFAULT);
this.initialBlockReportDelay = conf.getLong(
DFS_BLOCKREPORT_INITIAL_DELAY_KEY,
DFS_BLOCKREPORT_INITIAL_DELAY_DEFAULT) * 1000L;
if (this.initialBlockReportDelay >= blockReportInterval) {
this.initialBlockReportDelay = 0;
LOG.info("dfs.blockreport.initialDelay is greater than " +
"dfs.blockreport.intervalMsec." + " Setting initial delay to 0 msec:");
}
this.heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY, HEARTBEAT_INTERVAL) * 1000L;
this.heartBeatInterval = conf.getLong(DFS_HEARTBEAT_INTERVAL_KEY,
DFS_HEARTBEAT_INTERVAL_DEFAULT) * 1000L;
// do we need to sync block file contents to disk when blockfile is closed?
this.syncOnClose = conf.getBoolean(DFS_DATANODE_SYNCONCLOSE_KEY,
@ -617,7 +619,7 @@ public class DataNode extends Configured
} else {
ss = secureResources.getStreamingSocket();
}
ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE);
ss.setReceiveBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
// adjust machine name with the actual port
int tmpPort = ss.getLocalPort();
selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(),
@ -1967,8 +1969,8 @@ public class DataNode extends Configured
long writeTimeout = socketWriteTimeout +
HdfsConstants.WRITE_TIMEOUT_EXTENSION * (targets.length-1);
OutputStream baseStream = NetUtils.getOutputStream(sock, writeTimeout);
out = new DataOutputStream(new BufferedOutputStream(baseStream,
SMALL_BUFFER_SIZE));
out = new DataOutputStream(new BufferedOutputStream(baseStream,
FSConstants.SMALL_BUFFER_SIZE));
blockSender = new BlockSender(b, 0, b.getNumBytes(),
false, false, false, DataNode.this);
DatanodeInfo srcNode = new DatanodeInfo(bpReg);

View File

@ -69,7 +69,7 @@ import com.google.protobuf.ByteString;
/**
* Thread for processing incoming/outgoing data stream.
*/
class DataXceiver extends Receiver implements Runnable, FSConstants {
class DataXceiver extends Receiver implements Runnable {
public static final Log LOG = DataNode.LOG;
static final Log ClientTraceLog = DataNode.ClientTraceLog;
@ -202,8 +202,8 @@ class DataXceiver extends Receiver implements Runnable, FSConstants {
final long length) throws IOException {
OutputStream baseStream = NetUtils.getOutputStream(s,
datanode.socketWriteTimeout);
DataOutputStream out = new DataOutputStream(
new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE));
DataOutputStream out = new DataOutputStream(new BufferedOutputStream(
baseStream, FSConstants.SMALL_BUFFER_SIZE));
checkAccess(out, true, block, blockToken,
Op.READ_BLOCK, BlockTokenSecretManager.AccessMode.READ);
@ -329,7 +329,7 @@ class DataXceiver extends Receiver implements Runnable, FSConstants {
final DataOutputStream replyOut = new DataOutputStream(
new BufferedOutputStream(
NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
SMALL_BUFFER_SIZE));
FSConstants.SMALL_BUFFER_SIZE));
checkAccess(replyOut, isClient, block, blockToken,
Op.WRITE_BLOCK, BlockTokenSecretManager.AccessMode.WRITE);
@ -369,11 +369,11 @@ class DataXceiver extends Receiver implements Runnable, FSConstants {
(HdfsConstants.WRITE_TIMEOUT_EXTENSION * targets.length);
NetUtils.connect(mirrorSock, mirrorTarget, timeoutValue);
mirrorSock.setSoTimeout(timeoutValue);
mirrorSock.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
mirrorSock.setSendBufferSize(FSConstants.DEFAULT_DATA_SOCKET_SIZE);
mirrorOut = new DataOutputStream(
new BufferedOutputStream(
NetUtils.getOutputStream(mirrorSock, writeTimeout),
SMALL_BUFFER_SIZE));
FSConstants.SMALL_BUFFER_SIZE));
mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
new Sender(mirrorOut).writeBlock(originalBlock, blockToken,
@ -524,7 +524,7 @@ class DataXceiver extends Receiver implements Runnable, FSConstants {
final MetaDataInputStream metadataIn =
datanode.data.getMetaDataInputStream(block);
final DataInputStream checksumIn = new DataInputStream(new BufferedInputStream(
metadataIn, BUFFER_SIZE));
metadataIn, FSConstants.IO_FILE_BUFFER_SIZE));
updateCurrentThreadName("Getting checksum for block " + block);
try {
@ -603,7 +603,7 @@ class DataXceiver extends Receiver implements Runnable, FSConstants {
OutputStream baseStream = NetUtils.getOutputStream(
s, datanode.socketWriteTimeout);
reply = new DataOutputStream(new BufferedOutputStream(
baseStream, SMALL_BUFFER_SIZE));
baseStream, FSConstants.SMALL_BUFFER_SIZE));
// send status first
writeResponse(SUCCESS, reply);
@ -681,15 +681,15 @@ class DataXceiver extends Receiver implements Runnable, FSConstants {
OutputStream baseStream = NetUtils.getOutputStream(proxySock,
datanode.socketWriteTimeout);
proxyOut = new DataOutputStream(
new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE));
proxyOut = new DataOutputStream(new BufferedOutputStream(baseStream,
FSConstants.SMALL_BUFFER_SIZE));
/* send request to the proxy */
new Sender(proxyOut).copyBlock(block, blockToken);
// receive the response from the proxy
proxyReply = new DataInputStream(new BufferedInputStream(
NetUtils.getInputStream(proxySock), BUFFER_SIZE));
NetUtils.getInputStream(proxySock), FSConstants.IO_FILE_BUFFER_SIZE));
BlockOpResponseProto copyResponse = BlockOpResponseProto.parseFrom(
HdfsProtoUtil.vintPrefixed(proxyReply));

View File

@ -42,7 +42,7 @@ import org.apache.hadoop.util.Daemon;
* other DataNodes. This small server does not use the
* Hadoop IPC mechanism.
*/
class DataXceiverServer implements Runnable, FSConstants {
class DataXceiverServer implements Runnable {
public static final Log LOG = DataNode.LOG;
ServerSocket ss;
@ -119,8 +119,8 @@ class DataXceiverServer implements Runnable, FSConstants {
conf.getInt(DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_KEY,
DFSConfigKeys.DFS_DATANODE_MAX_RECEIVER_THREADS_DEFAULT);
this.estimateBlockSize =
conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
this.estimateBlockSize = conf.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
//set up parameter for cluster balancing
this.balanceThrottler = new BlockBalanceThrottler(

View File

@ -47,8 +47,8 @@ import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.StringUtils;
import org.mortbay.util.URIUtil;
@InterfaceAudience.Private
public class DatanodeJspHelper {
@ -289,7 +289,7 @@ public class DatanodeJspHelper {
// Add the various links for looking at the file contents
// URL for downloading the full file
String downloadUrl = "http://" + req.getServerName() + ":"
+ req.getServerPort() + "/streamFile" + URIUtil.encodePath(filename)
+ req.getServerPort() + "/streamFile" + ServletUtil.encodePath(filename)
+ JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr, true)
+ JspHelper.getDelegationTokenUrlParam(tokenString);
out.print("<a name=\"viewOptions\"></a>");

View File

@ -75,7 +75,7 @@ import org.apache.hadoop.util.ReflectionUtils;
*
***************************************************/
@InterfaceAudience.Private
public class FSDataset implements FSConstants, FSDatasetInterface {
public class FSDataset implements FSDatasetInterface {
/**
* A node type that can be built into a tree reflecting the
@ -465,7 +465,7 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
}
checksumIn = new DataInputStream(
new BufferedInputStream(new FileInputStream(metaFile),
BUFFER_SIZE));
FSConstants.IO_FILE_BUFFER_SIZE));
// read and handle the common header here. For now just a version
BlockMetadataHeader header = BlockMetadataHeader.readHeader(checksumIn);
@ -775,12 +775,13 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
*/
private volatile List<FSVolume> volumes = null;
BlockVolumeChoosingPolicy blockChooser;
int numFailedVolumes = 0;
int numFailedVolumes;
FSVolumeSet(FSVolume[] volumes, BlockVolumeChoosingPolicy blockChooser) {
FSVolumeSet(FSVolume[] volumes, int failedVols, BlockVolumeChoosingPolicy blockChooser) {
List<FSVolume> list = Arrays.asList(volumes);
this.volumes = Collections.unmodifiableList(list);
this.blockChooser = blockChooser;
this.numFailedVolumes = failedVols;
}
private int numberOfVolumes() {
@ -1144,15 +1145,19 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
String[] dataDirs = conf.getTrimmedStrings(DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY);
int volsConfigured = (dataDirs == null) ? 0 : dataDirs.length;
int volsFailed = volsConfigured - storage.getNumStorageDirs();
this.validVolsRequired = volsConfigured - volFailuresTolerated;
if (validVolsRequired < 1
|| validVolsRequired > storage.getNumStorageDirs()) {
if (volFailuresTolerated < 0 || volFailuresTolerated >= volsConfigured) {
throw new DiskErrorException("Invalid volume failure "
+ " config value: " + volFailuresTolerated);
}
if (volsFailed > volFailuresTolerated) {
throw new DiskErrorException("Too many failed volumes - "
+ "current valid volumes: " + storage.getNumStorageDirs()
+ ", volumes configured: " + volsConfigured
+ ", volume failures tolerated: " + volFailuresTolerated );
+ ", volumes failed: " + volsFailed
+ ", volume failures tolerated: " + volFailuresTolerated);
}
FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
@ -1170,7 +1175,7 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
RoundRobinVolumesPolicy.class,
BlockVolumeChoosingPolicy.class),
conf);
volumes = new FSVolumeSet(volArray, blockChooserImpl);
volumes = new FSVolumeSet(volArray, volsFailed, blockChooserImpl);
volumes.getVolumeMap(volumeMap);
File[] roots = new File[storage.getNumStorageDirs()];

View File

@ -91,7 +91,6 @@ public class BackupImage extends FSImage {
super(conf);
storage.setDisablePreUpgradableLayoutCheck(true);
bnState = BNState.DROP_UNTIL_NEXT_ROLL;
editLog.initJournals();
}
/**
@ -210,14 +209,13 @@ public class BackupImage extends FSImage {
if (LOG.isTraceEnabled()) {
LOG.debug("data:" + StringUtils.byteToHexString(data));
}
backupInputStream.setBytes(data);
FSEditLogLoader logLoader = new FSEditLogLoader(namesystem);
int logVersion = storage.getLayoutVersion();
BufferedInputStream bin = new BufferedInputStream(backupInputStream);
DataInputStream in = new DataInputStream(bin);
Checksum checksum = FSEditLog.getChecksum();
int numLoaded = logLoader.loadEditRecords(logVersion, in, checksum, true,
lastAppliedTxId + 1);
backupInputStream.setBytes(data, logVersion);
int numLoaded = logLoader.loadEditRecords(logVersion, backupInputStream,
true, lastAppliedTxId + 1);
if (numLoaded != numTxns) {
throw new IOException("Batch of txns starting at txnid " +
firstTxId + " was supposed to contain " + numTxns +

View File

@ -54,7 +54,7 @@ class BackupJournalManager implements JournalManager {
}
@Override
public void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {
}

View File

@ -207,7 +207,7 @@ class Checkpointer extends Daemon {
long lastApplied = bnImage.getLastAppliedTxId();
LOG.debug("Doing checkpoint. Last applied: " + lastApplied);
RemoteEditLogManifest manifest =
getNamenode().getEditLogManifest(bnImage.getLastAppliedTxId());
getNamenode().getEditLogManifest(bnImage.getLastAppliedTxId() + 1);
if (!manifest.getLogs().isEmpty()) {
RemoteEditLog firstRemoteLog = manifest.getLogs().get(0);

View File

@ -31,6 +31,7 @@ import org.apache.hadoop.fs.ContentSummary;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
import org.znerd.xmlenc.XMLOutputter;
/** Servlets for file checksum */
@ -49,8 +50,7 @@ public class ContentSummaryServlet extends DfsServlet {
ugi.doAs(new PrivilegedExceptionAction<Void>() {
@Override
public Void run() throws Exception {
final String path = request.getPathInfo();
final String path = ServletUtil.getDecodedPath(request, "/contentSummary");
final PrintWriter out = response.getWriter();
final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
xml.declaration();

View File

@ -19,8 +19,6 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.URI;
import java.net.URISyntaxException;
import javax.servlet.ServletContext;
import javax.servlet.http.HttpServlet;
@ -33,8 +31,6 @@ import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSUtil;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
@ -86,48 +82,6 @@ abstract class DfsServlet extends HttpServlet {
return DFSUtil.createNamenode(nnAddr, conf);
}
/** Create a URI for redirecting request to a datanode */
protected URI createRedirectUri(String servletpath,
UserGroupInformation ugi,
DatanodeID host,
HttpServletRequest request,
NameNode nn
) throws IOException, URISyntaxException {
final String hostname = host instanceof DatanodeInfo?
((DatanodeInfo)host).getHostName(): host.getHost();
final String scheme = request.getScheme();
final int port = "https".equals(scheme)?
(Integer)getServletContext().getAttribute("datanode.https.port")
: host.getInfoPort();
final String filename = request.getPathInfo();
StringBuilder params = new StringBuilder();
params.append("filename=");
params.append(filename);
if (UserGroupInformation.isSecurityEnabled()) {
String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
params.append(JspHelper.getDelegationTokenUrlParam(tokenString));
} else {
params.append("&ugi=");
params.append(ugi.getShortUserName());
}
// Add namenode address to the URL params
String nnAddr = NameNode.getHostPortString(nn.getNameNodeAddress());
params.append(JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, nnAddr));
return new URI(scheme, null, hostname, port, servletpath,
params.toString(), null);
}
/** Get filename from the request */
protected String getFilename(HttpServletRequest request,
HttpServletResponse response) throws IOException {
final String filename = request.getParameter("filename");
if (filename == null || filename.length() == 0) {
throw new IOException("Invalid filename");
}
return filename;
}
protected UserGroupInformation getUGI(HttpServletRequest request,
Configuration conf) throws IOException {
return JspHelper.getUGI(getServletContext(), request, conf);

View File

@ -21,6 +21,8 @@ import java.io.DataInputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import com.google.common.base.Preconditions;
/**
* An implementation of the abstract class {@link EditLogInputStream},
* which is used to updates HDFS meta-data state on a backup node.
@ -33,6 +35,9 @@ class EditLogBackupInputStream extends EditLogInputStream {
String address; // sender address
private ByteBufferInputStream inner;
private DataInputStream in;
private FSEditLogOp.Reader reader = null;
private FSEditLogLoader.PositionTrackingInputStream tracker = null;
private int version = 0;
/**
* A ByteArrayInputStream, which lets modify the underlying byte array.
@ -60,7 +65,8 @@ class EditLogBackupInputStream extends EditLogInputStream {
EditLogBackupInputStream(String name) throws IOException {
address = name;
inner = new ByteBufferInputStream();
in = new DataInputStream(inner);
in = null;
reader = null;
}
@Override // JournalStream
@ -74,18 +80,20 @@ class EditLogBackupInputStream extends EditLogInputStream {
}
@Override
public int available() throws IOException {
return in.available();
public FSEditLogOp readOp() throws IOException {
Preconditions.checkState(reader != null,
"Must call setBytes() before readOp()");
return reader.readOp();
}
@Override
public int read() throws IOException {
return in.read();
public int getVersion() throws IOException {
return this.version;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return in.read(b, off, len);
public long getPosition() {
return tracker.getPos();
}
@Override
@ -99,16 +107,19 @@ class EditLogBackupInputStream extends EditLogInputStream {
return inner.length();
}
DataInputStream getDataInputStream() {
return in;
}
void setBytes(byte[] newBytes) throws IOException {
void setBytes(byte[] newBytes, int version) throws IOException {
inner.setData(newBytes);
in.reset();
tracker = new FSEditLogLoader.PositionTrackingInputStream(inner);
in = new DataInputStream(tracker);
this.version = version;
reader = new FSEditLogOp.Reader(in, version);
}
void clear() throws IOException {
setBytes(null);
setBytes(null, 0);
reader = null;
this.version = 0;
}
}

View File

@ -21,18 +21,51 @@ package org.apache.hadoop.hdfs.server.namenode;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.BufferedInputStream;
import java.io.EOFException;
import java.io.DataInputStream;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.io.IOUtils;
import com.google.common.annotations.VisibleForTesting;
/**
* An implementation of the abstract class {@link EditLogInputStream}, which
* reads edits from a local file.
*/
class EditLogFileInputStream extends EditLogInputStream {
private File file;
private FileInputStream fStream;
EditLogFileInputStream(File name) throws IOException {
private final File file;
private final FileInputStream fStream;
private final int logVersion;
private final FSEditLogOp.Reader reader;
private final FSEditLogLoader.PositionTrackingInputStream tracker;
/**
* Open an EditLogInputStream for the given file.
* @param name filename to open
* @throws LogHeaderCorruptException if the header is either missing or
* appears to be corrupt/truncated
* @throws IOException if an actual IO error occurs while reading the
* header
*/
EditLogFileInputStream(File name)
throws LogHeaderCorruptException, IOException {
file = name;
fStream = new FileInputStream(name);
BufferedInputStream bin = new BufferedInputStream(fStream);
tracker = new FSEditLogLoader.PositionTrackingInputStream(bin);
DataInputStream in = new DataInputStream(tracker);
try {
logVersion = readLogVersion(in);
} catch (EOFException eofe) {
throw new LogHeaderCorruptException("No header found in log");
}
reader = new FSEditLogOp.Reader(in, logVersion);
}
@Override // JournalStream
@ -46,18 +79,18 @@ class EditLogFileInputStream extends EditLogInputStream {
}
@Override
public int available() throws IOException {
return fStream.available();
public FSEditLogOp readOp() throws IOException {
return reader.readOp();
}
@Override
public int read() throws IOException {
return fStream.read();
public int getVersion() throws IOException {
return logVersion;
}
@Override
public int read(byte[] b, int off, int len) throws IOException {
return fStream.read(b, off, len);
public long getPosition() {
return tracker.getPos();
}
@Override
@ -76,4 +109,62 @@ class EditLogFileInputStream extends EditLogInputStream {
return getName();
}
static FSEditLogLoader.EditLogValidation validateEditLog(File file) throws IOException {
EditLogFileInputStream in;
try {
in = new EditLogFileInputStream(file);
} catch (LogHeaderCorruptException corrupt) {
// If it's missing its header, this is equivalent to no transactions
FSImage.LOG.warn("Log at " + file + " has no valid header",
corrupt);
return new FSEditLogLoader.EditLogValidation(0, 0);
}
try {
return FSEditLogLoader.validateEditLog(in);
} finally {
IOUtils.closeStream(in);
}
}
/**
* Read the header of fsedit log
* @param in fsedit stream
* @return the edit log version number
* @throws IOException if error occurs
*/
@VisibleForTesting
static int readLogVersion(DataInputStream in)
throws IOException, LogHeaderCorruptException {
int logVersion;
try {
logVersion = in.readInt();
} catch (EOFException eofe) {
throw new LogHeaderCorruptException(
"Reached EOF when reading log header");
}
if (logVersion < FSConstants.LAYOUT_VERSION) { // future version
throw new LogHeaderCorruptException(
"Unexpected version of the file system log file: "
+ logVersion + ". Current version = "
+ FSConstants.LAYOUT_VERSION + ".");
}
assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION :
"Unsupported version " + logVersion;
return logVersion;
}
/**
* Exception indicating that the header of an edits log file is
* corrupted. This can be because the header is not present,
* or because the header data is invalid (eg claims to be
* over a newer version than the running NameNode)
*/
static class LogHeaderCorruptException extends IOException {
private static final long serialVersionUID = 1L;
private LogHeaderCorruptException(String msg) {
super(msg);
}
}
}

View File

@ -17,10 +17,8 @@
*/
package org.apache.hadoop.hdfs.server.namenode;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.Closeable;
import java.io.IOException;
import java.io.InputStream;
/**
* A generic abstract class to support reading edits log data from
@ -29,29 +27,41 @@ import java.io.InputStream;
* It should stream bytes from the storage exactly as they were written
* into the #{@link EditLogOutputStream}.
*/
abstract class EditLogInputStream extends InputStream
implements JournalStream {
/** {@inheritDoc} */
public abstract int available() throws IOException;
/** {@inheritDoc} */
public abstract int read() throws IOException;
/** {@inheritDoc} */
public abstract int read(byte[] b, int off, int len) throws IOException;
/** {@inheritDoc} */
abstract class EditLogInputStream implements JournalStream, Closeable {
/**
* Close the stream.
* @throws IOException if an error occurred while closing
*/
public abstract void close() throws IOException;
/**
* Read an operation from the stream
* @return an operation from the stream or null if at end of stream
* @throws IOException if there is an error reading from the stream
*/
public abstract FSEditLogOp readOp() throws IOException;
/**
* Get the layout version of the data in the stream.
* @return the layout version of the ops in the stream.
* @throws IOException if there is an error reading the version
*/
public abstract int getVersion() throws IOException;
/**
* Get the "position" of in the stream. This is useful for
* debugging and operational purposes.
*
* Different stream types can have a different meaning for
* what the position is. For file streams it means the byte offset
* from the start of the file.
*
* @return the position in the stream
*/
public abstract long getPosition();
/**
* Return the size of the current edits log.
*/
abstract long length() throws IOException;
/**
* Return DataInputStream based on this edit stream.
*/
DataInputStream getDataInputStream() {
return new DataInputStream(new BufferedInputStream(this));
}
}

View File

@ -444,8 +444,6 @@ public class FSDirectory implements Closeable {
// modify file-> block and blocksMap
fileNode.removeLastBlock(block);
getBlockManager().removeBlockFromMap(block);
// If block is removed from blocksMap remove it from corruptReplicasMap
getBlockManager().removeFromCorruptReplicasMap(block);
// write modified block locations to log
fsImage.getEditLog().logOpenFile(path, fileNode);
@ -809,7 +807,7 @@ public class FSDirectory implements Closeable {
* @return array of file blocks
* @throws QuotaExceededException
*/
Block[] setReplication(String src, short replication, int[] oldReplication)
Block[] setReplication(String src, short replication, short[] oldReplication)
throws QuotaExceededException, UnresolvedLinkException {
waitForReady();
Block[] fileBlocks = null;
@ -826,14 +824,10 @@ public class FSDirectory implements Closeable {
Block[] unprotectedSetReplication(String src,
short replication,
int[] oldReplication
short[] oldReplication
) throws QuotaExceededException,
UnresolvedLinkException {
assert hasWriteLock();
if (oldReplication == null) {
oldReplication = new int[1];
}
oldReplication[0] = -1;
INode[] inodes = rootDir.getExistingPathINodes(src, true);
INode inode = inodes[inodes.length - 1];
@ -845,14 +839,17 @@ public class FSDirectory implements Closeable {
return null;
}
INodeFile fileNode = (INodeFile)inode;
oldReplication[0] = fileNode.getReplication();
final short oldRepl = fileNode.getReplication();
// check disk quota
long dsDelta = (replication - oldReplication[0]) *
(fileNode.diskspaceConsumed()/oldReplication[0]);
long dsDelta = (replication - oldRepl) * (fileNode.diskspaceConsumed()/oldRepl);
updateCount(inodes, inodes.length-1, 0, dsDelta, true);
fileNode.setReplication(replication);
if (oldReplication != null) {
oldReplication[0] = oldRepl;
}
return fileNode.getBlocks();
}
@ -1344,7 +1341,7 @@ public class FSDirectory implements Closeable {
* @throws QuotaExceededException if the new count violates any quota limit
* @throws FileNotFound if path does not exist.
*/
public void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
void updateSpaceConsumed(String path, long nsDelta, long dsDelta)
throws QuotaExceededException,
FileNotFoundException,
UnresolvedLinkException {
@ -2075,8 +2072,9 @@ public class FSDirectory implements Closeable {
size = fileNode.computeFileSize(true);
replication = fileNode.getReplication();
blocksize = fileNode.getPreferredBlockSize();
loc = getFSNamesystem().getBlockLocationsInternal(
fileNode, 0L, size, false);
loc = getFSNamesystem().getBlockManager().createLocatedBlocks(
fileNode.getBlocks(), fileNode.computeFileSize(false),
fileNode.isUnderConstruction(), 0L, size, false);
if (loc==null) {
loc = new LocatedBlocks();
}

View File

@ -18,10 +18,10 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.zip.Checksum;
import java.util.zip.CheckedOutputStream;
import java.util.SortedSet;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@ -29,28 +29,26 @@ import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.classification.InterfaceStability;
import org.apache.hadoop.fs.Options;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Writable;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.security.token.delegation.DelegationKey;
import org.apache.hadoop.util.PureJavaCrc32;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableListMultimap;
import com.google.common.collect.Lists;
import com.google.common.collect.Multimaps;
import com.google.common.collect.Sets;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.*;
/**
@ -116,18 +114,6 @@ public class FSEditLog {
private NNStorage storage;
private static ThreadLocal<Checksum> localChecksum =
new ThreadLocal<Checksum>() {
protected Checksum initialValue() {
return new PureJavaCrc32();
}
};
/** Get a thread local checksum */
public static Checksum getChecksum() {
return localChecksum.get();
}
private static class TransactionId {
public long txid;
@ -148,15 +134,6 @@ public class FSEditLog {
this.storage = storage;
metrics = NameNode.getNameNodeMetrics();
lastPrintTime = now();
}
/**
* Initialize the list of edit journals
*/
synchronized void initJournals() {
assert journals.isEmpty();
Preconditions.checkState(state == State.UNINITIALIZED,
"Bad state: %s", state);
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
journals.add(new JournalAndStream(new FileJournalManager(sd)));
@ -174,8 +151,7 @@ public class FSEditLog {
* log segment.
*/
synchronized void open() throws IOException {
Preconditions.checkState(state == State.UNINITIALIZED);
initJournals();
Preconditions.checkState(state == State.BETWEEN_LOG_SEGMENTS);
startLogSegment(getLastWrittenTxId() + 1, true);
assert state == State.IN_SEGMENT : "Bad state: " + state;
@ -755,18 +731,64 @@ public class FSEditLog {
/**
* Return a manifest of what finalized edit logs are available
*/
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
FSImageTransactionalStorageInspector inspector =
new FSImageTransactionalStorageInspector();
for (StorageDirectory sd : storage.dirIterable(NameNodeDirType.EDITS)) {
inspector.inspectDirectory(sd);
public synchronized RemoteEditLogManifest getEditLogManifest(
long fromTxId) throws IOException {
// Collect RemoteEditLogs available from each FileJournalManager
List<RemoteEditLog> allLogs = Lists.newArrayList();
for (JournalAndStream j : journals) {
if (j.getManager() instanceof FileJournalManager) {
FileJournalManager fjm = (FileJournalManager)j.getManager();
try {
allLogs.addAll(fjm.getRemoteEditLogs(fromTxId));
} catch (Throwable t) {
LOG.warn("Cannot list edit logs in " + fjm, t);
}
}
}
return inspector.getEditLogManifest(sinceTxId);
// Group logs by their starting txid
ImmutableListMultimap<Long, RemoteEditLog> logsByStartTxId =
Multimaps.index(allLogs, RemoteEditLog.GET_START_TXID);
long curStartTxId = fromTxId;
List<RemoteEditLog> logs = Lists.newArrayList();
while (true) {
ImmutableList<RemoteEditLog> logGroup = logsByStartTxId.get(curStartTxId);
if (logGroup.isEmpty()) {
// we have a gap in logs - for example because we recovered some old
// storage directory with ancient logs. Clear out any logs we've
// accumulated so far, and then skip to the next segment of logs
// after the gap.
SortedSet<Long> startTxIds = Sets.newTreeSet(logsByStartTxId.keySet());
startTxIds = startTxIds.tailSet(curStartTxId);
if (startTxIds.isEmpty()) {
break;
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Found gap in logs at " + curStartTxId + ": " +
"not returning previous logs in manifest.");
}
logs.clear();
curStartTxId = startTxIds.first();
continue;
}
}
// Find the one that extends the farthest forward
RemoteEditLog bestLog = Collections.max(logGroup);
logs.add(bestLog);
// And then start looking from after that point
curStartTxId = bestLog.getEndTxId() + 1;
}
RemoteEditLogManifest ret = new RemoteEditLogManifest(logs);
if (LOG.isDebugEnabled()) {
LOG.debug("Generated manifest for logs since " + fromTxId + ":"
+ ret);
}
return ret;
}
/**
* Finalizes the current edit log and opens a new log segment.
* @return the transaction id of the BEGIN_LOG_SEGMENT transaction
@ -877,8 +899,7 @@ public class FSEditLog {
/**
* Archive any log files that are older than the given txid.
*/
public void purgeLogsOlderThan(
final long minTxIdToKeep, final StoragePurger purger) {
public void purgeLogsOlderThan(final long minTxIdToKeep) {
synchronized (this) {
// synchronized to prevent findbugs warning about inconsistent
// synchronization. This will be JIT-ed out if asserts are
@ -892,7 +913,7 @@ public class FSEditLog {
mapJournalsAndReportErrors(new JournalClosure() {
@Override
public void apply(JournalAndStream jas) throws IOException {
jas.manager.purgeLogsOlderThan(minTxIdToKeep, purger);
jas.manager.purgeLogsOlderThan(minTxIdToKeep);
}
}, "purging logs older than " + minTxIdToKeep);
}
@ -1080,7 +1101,8 @@ public class FSEditLog {
stream = null;
}
private void abort() {
@VisibleForTesting
void abort() {
if (stream == null) return;
try {
stream.abort();

View File

@ -19,15 +19,12 @@ package org.apache.hadoop.hdfs.server.namenode;
import static org.apache.hadoop.hdfs.server.common.Util.now;
import java.io.BufferedInputStream;
import java.io.DataInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FilterInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.zip.Checksum;
import java.util.EnumMap;
import org.apache.hadoop.fs.permission.PermissionStatus;
import org.apache.hadoop.hdfs.protocol.FSConstants;
@ -37,8 +34,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfo;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockInfoUnderConstruction;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.LogHeader;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Reader;
import org.apache.hadoop.hdfs.server.namenode.EditLogFileInputStream.LogHeaderCorruptException;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.AddCloseOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.CancelDelegationTokenOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.ClearNSQuotaOp;
@ -60,6 +56,10 @@ import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.SymlinkOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.TimesOp;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.UpdateMasterKeyOp;
import org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease;
import org.apache.hadoop.hdfs.util.Holder;
import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Joiner;
public class FSEditLogLoader {
private final FSNamesystem fsNamesys;
@ -84,49 +84,36 @@ public class FSEditLogLoader {
}
int loadFSEdits(EditLogInputStream edits, boolean closeOnExit,
long expectedStartingTxId)
throws IOException {
BufferedInputStream bin = new BufferedInputStream(edits);
DataInputStream in = new DataInputStream(bin);
long expectedStartingTxId)
throws IOException {
int numEdits = 0;
int logVersion = edits.getVersion();
try {
LogHeader header = LogHeader.read(in);
numEdits = loadEditRecords(
header.logVersion, in, header.checksum, false,
expectedStartingTxId);
numEdits = loadEditRecords(logVersion, edits, false,
expectedStartingTxId);
} finally {
if(closeOnExit)
in.close();
if(closeOnExit) {
edits.close();
}
}
return numEdits;
}
@SuppressWarnings("deprecation")
int loadEditRecords(int logVersion, DataInputStream in,
Checksum checksum, boolean closeOnExit,
int loadEditRecords(int logVersion, EditLogInputStream in, boolean closeOnExit,
long expectedStartingTxId)
throws IOException {
FSDirectory fsDir = fsNamesys.dir;
int numEdits = 0;
int numOpAdd = 0, numOpClose = 0, numOpDelete = 0,
numOpRenameOld = 0, numOpSetRepl = 0, numOpMkDir = 0,
numOpSetPerm = 0, numOpSetOwner = 0, numOpSetGenStamp = 0,
numOpTimes = 0, numOpRename = 0, numOpConcatDelete = 0,
numOpSymlink = 0, numOpGetDelegationToken = 0,
numOpRenewDelegationToken = 0, numOpCancelDelegationToken = 0,
numOpUpdateMasterKey = 0, numOpReassignLease = 0, numOpOther = 0;
EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts =
new EnumMap<FSEditLogOpCodes, Holder<Integer>>(FSEditLogOpCodes.class);
fsNamesys.writeLock();
fsDir.writeLock();
// Keep track of the file offsets of the last several opcodes.
// This is handy when manually recovering corrupted edits files.
PositionTrackingInputStream tracker = new PositionTrackingInputStream(in);
in = new DataInputStream(tracker);
long recentOpcodeOffsets[] = new long[4];
Arrays.fill(recentOpcodeOffsets, -1);
@ -134,12 +121,10 @@ public class FSEditLogLoader {
long txId = expectedStartingTxId - 1;
try {
FSEditLogOp.Reader reader = new FSEditLogOp.Reader(in, logVersion,
checksum);
FSEditLogOp op;
while ((op = reader.readOp()) != null) {
while ((op = in.readOp()) != null) {
recentOpcodeOffsets[numEdits % recentOpcodeOffsets.length] =
tracker.getPos();
in.getPosition();
if (LayoutVersion.supports(Feature.STORED_TXIDS, logVersion)) {
long thisTxId = op.txid;
if (thisTxId != txId + 1) {
@ -150,6 +135,7 @@ public class FSEditLogLoader {
}
numEdits++;
incrOpCount(op.opCode, opCounts);
switch (op.opCode) {
case OP_ADD:
case OP_CLOSE: {
@ -157,8 +143,8 @@ public class FSEditLogLoader {
// versions > 0 support per file replication
// get name and replication
short replication
= fsNamesys.adjustReplication(addCloseOp.replication);
final short replication = fsNamesys.getBlockManager(
).adjustReplication(addCloseOp.replication);
long blockSize = addCloseOp.blockSize;
BlockInfo blocks[] = new BlockInfo[addCloseOp.blocks.length];
@ -209,7 +195,6 @@ public class FSEditLogLoader {
blocks, replication,
addCloseOp.mtime, addCloseOp.atime, blockSize);
if (addCloseOp.opCode == FSEditLogOpCodes.OP_ADD) {
numOpAdd++;
//
// Replace current node with a INodeUnderConstruction.
// Recreate in-memory lease record.
@ -231,24 +216,20 @@ public class FSEditLogLoader {
break;
}
case OP_SET_REPLICATION: {
numOpSetRepl++;
SetReplicationOp setReplicationOp = (SetReplicationOp)op;
short replication
= fsNamesys.adjustReplication(setReplicationOp.replication);
short replication = fsNamesys.getBlockManager().adjustReplication(
setReplicationOp.replication);
fsDir.unprotectedSetReplication(setReplicationOp.path,
replication, null);
break;
}
case OP_CONCAT_DELETE: {
numOpConcatDelete++;
ConcatDeleteOp concatDeleteOp = (ConcatDeleteOp)op;
fsDir.unprotectedConcat(concatDeleteOp.trg, concatDeleteOp.srcs,
concatDeleteOp.timestamp);
break;
}
case OP_RENAME_OLD: {
numOpRenameOld++;
RenameOldOp renameOp = (RenameOldOp)op;
HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
fsDir.unprotectedRenameTo(renameOp.src, renameOp.dst,
@ -257,14 +238,11 @@ public class FSEditLogLoader {
break;
}
case OP_DELETE: {
numOpDelete++;
DeleteOp deleteOp = (DeleteOp)op;
fsDir.unprotectedDelete(deleteOp.path, deleteOp.timestamp);
break;
}
case OP_MKDIR: {
numOpMkDir++;
MkdirOp mkdirOp = (MkdirOp)op;
PermissionStatus permissions = fsNamesys.getUpgradePermission();
if (mkdirOp.permissions != null) {
@ -276,22 +254,17 @@ public class FSEditLogLoader {
break;
}
case OP_SET_GENSTAMP: {
numOpSetGenStamp++;
SetGenstampOp setGenstampOp = (SetGenstampOp)op;
fsNamesys.setGenerationStamp(setGenstampOp.genStamp);
break;
}
case OP_SET_PERMISSIONS: {
numOpSetPerm++;
SetPermissionsOp setPermissionsOp = (SetPermissionsOp)op;
fsDir.unprotectedSetPermission(setPermissionsOp.src,
setPermissionsOp.permissions);
break;
}
case OP_SET_OWNER: {
numOpSetOwner++;
SetOwnerOp setOwnerOp = (SetOwnerOp)op;
fsDir.unprotectedSetOwner(setOwnerOp.src, setOwnerOp.username,
setOwnerOp.groupname);
@ -320,7 +293,6 @@ public class FSEditLogLoader {
break;
case OP_TIMES: {
numOpTimes++;
TimesOp timesOp = (TimesOp)op;
fsDir.unprotectedSetTimes(timesOp.path,
@ -329,8 +301,6 @@ public class FSEditLogLoader {
break;
}
case OP_SYMLINK: {
numOpSymlink++;
SymlinkOp symlinkOp = (SymlinkOp)op;
fsDir.unprotectedSymlink(symlinkOp.path, symlinkOp.value,
symlinkOp.mtime, symlinkOp.atime,
@ -338,7 +308,6 @@ public class FSEditLogLoader {
break;
}
case OP_RENAME: {
numOpRename++;
RenameOp renameOp = (RenameOp)op;
HdfsFileStatus dinfo = fsDir.getFileInfo(renameOp.dst, false);
@ -348,7 +317,6 @@ public class FSEditLogLoader {
break;
}
case OP_GET_DELEGATION_TOKEN: {
numOpGetDelegationToken++;
GetDelegationTokenOp getDelegationTokenOp
= (GetDelegationTokenOp)op;
@ -358,8 +326,6 @@ public class FSEditLogLoader {
break;
}
case OP_RENEW_DELEGATION_TOKEN: {
numOpRenewDelegationToken++;
RenewDelegationTokenOp renewDelegationTokenOp
= (RenewDelegationTokenOp)op;
fsNamesys.getDelegationTokenSecretManager()
@ -368,8 +334,6 @@ public class FSEditLogLoader {
break;
}
case OP_CANCEL_DELEGATION_TOKEN: {
numOpCancelDelegationToken++;
CancelDelegationTokenOp cancelDelegationTokenOp
= (CancelDelegationTokenOp)op;
fsNamesys.getDelegationTokenSecretManager()
@ -378,14 +342,12 @@ public class FSEditLogLoader {
break;
}
case OP_UPDATE_MASTER_KEY: {
numOpUpdateMasterKey++;
UpdateMasterKeyOp updateMasterKeyOp = (UpdateMasterKeyOp)op;
fsNamesys.getDelegationTokenSecretManager()
.updatePersistedMasterKey(updateMasterKeyOp.key);
break;
}
case OP_REASSIGN_LEASE: {
numOpReassignLease++;
ReassignLeaseOp reassignLeaseOp = (ReassignLeaseOp)op;
Lease lease = fsNamesys.leaseManager.getLease(
@ -400,17 +362,16 @@ public class FSEditLogLoader {
case OP_START_LOG_SEGMENT:
case OP_END_LOG_SEGMENT: {
// no data in here currently.
numOpOther++;
break;
}
case OP_DATANODE_ADD:
case OP_DATANODE_REMOVE:
numOpOther++;
break;
default:
throw new IOException("Invalid operation read " + op.opCode);
}
}
} catch (IOException ex) {
check203UpgradeFailure(logVersion, ex);
} finally {
@ -421,7 +382,7 @@ public class FSEditLogLoader {
// Catch Throwable because in the case of a truly corrupt edits log, any
// sort of error might be thrown (NumberFormat, NullPointer, EOF, etc.)
StringBuilder sb = new StringBuilder();
sb.append("Error replaying edit log at offset " + tracker.getPos());
sb.append("Error replaying edit log at offset " + in.getPosition());
if (recentOpcodeOffsets[0] != -1) {
Arrays.sort(recentOpcodeOffsets);
sb.append("\nRecent opcode offsets:");
@ -439,26 +400,31 @@ public class FSEditLogLoader {
fsNamesys.writeUnlock();
}
if (FSImage.LOG.isDebugEnabled()) {
FSImage.LOG.debug("numOpAdd = " + numOpAdd + " numOpClose = " + numOpClose
+ " numOpDelete = " + numOpDelete
+ " numOpRenameOld = " + numOpRenameOld
+ " numOpSetRepl = " + numOpSetRepl + " numOpMkDir = " + numOpMkDir
+ " numOpSetPerm = " + numOpSetPerm
+ " numOpSetOwner = " + numOpSetOwner
+ " numOpSetGenStamp = " + numOpSetGenStamp
+ " numOpTimes = " + numOpTimes
+ " numOpConcatDelete = " + numOpConcatDelete
+ " numOpRename = " + numOpRename
+ " numOpGetDelegationToken = " + numOpGetDelegationToken
+ " numOpRenewDelegationToken = " + numOpRenewDelegationToken
+ " numOpCancelDelegationToken = " + numOpCancelDelegationToken
+ " numOpUpdateMasterKey = " + numOpUpdateMasterKey
+ " numOpReassignLease = " + numOpReassignLease
+ " numOpOther = " + numOpOther);
dumpOpCounts(opCounts);
}
return numEdits;
}
private static void dumpOpCounts(
EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
StringBuilder sb = new StringBuilder();
sb.append("Summary of operations loaded from edit log:\n ");
Joiner.on("\n ").withKeyValueSeparator("=").appendTo(sb, opCounts);
FSImage.LOG.debug(sb.toString());
}
private void incrOpCount(FSEditLogOpCodes opCode,
EnumMap<FSEditLogOpCodes, Holder<Integer>> opCounts) {
Holder<Integer> holder = opCounts.get(opCode);
if (holder == null) {
holder = new Holder<Integer>(1);
opCounts.put(opCode, holder);
} else {
holder.held++;
}
}
/**
* Throw appropriate exception during upgrade from 203, when editlog loading
* could fail due to opcode conflicts.
@ -480,49 +446,50 @@ public class FSEditLogLoader {
}
}
static EditLogValidation validateEditLog(File file) throws IOException {
EditLogFileInputStream in;
try {
in = new EditLogFileInputStream(file);
} catch (LogHeaderCorruptException corrupt) {
// If it's missing its header, this is equivalent to no transactions
FSImage.LOG.warn("Log at " + file + " has no valid header",
corrupt);
return new EditLogValidation(0, 0);
}
try {
return validateEditLog(in);
} finally {
IOUtils.closeStream(in);
}
}
/**
* Return the number of valid transactions in the file. If the file is
* Return the number of valid transactions in the stream. If the stream is
* truncated during the header, returns a value indicating that there are
* 0 valid transactions.
* @throws IOException if the file cannot be read due to an IO error (eg
* 0 valid transactions. This reads through the stream but does not close
* it.
* @throws IOException if the stream cannot be read due to an IO error (eg
* if the log does not exist)
*/
static EditLogValidation validateEditLog(File f) throws IOException {
FileInputStream fis = new FileInputStream(f);
static EditLogValidation validateEditLog(EditLogInputStream in) {
long numValid = 0;
long lastPos = 0;
try {
PositionTrackingInputStream tracker = new PositionTrackingInputStream(
new BufferedInputStream(fis));
DataInputStream dis = new DataInputStream(tracker);
LogHeader header;
try {
header = LogHeader.read(dis);
} catch (Throwable t) {
FSImage.LOG.debug("Unable to read header from " + f +
" -> no valid transactions in this file.");
return new EditLogValidation(0, 0);
}
Reader reader = new FSEditLogOp.Reader(dis, header.logVersion, header.checksum);
long numValid = 0;
long lastPos = 0;
try {
while (true) {
lastPos = tracker.getPos();
if (reader.readOp() == null) {
break;
}
numValid++;
while (true) {
lastPos = in.getPosition();
if (in.readOp() == null) {
break;
}
} catch (Throwable t) {
// Catch Throwable and not just IOE, since bad edits may generate
// NumberFormatExceptions, AssertionErrors, OutOfMemoryErrors, etc.
FSImage.LOG.debug("Caught exception after reading " + numValid +
" ops from " + f + " while determining its valid length.", t);
numValid++;
}
return new EditLogValidation(lastPos, numValid);
} finally {
fis.close();
} catch (Throwable t) {
// Catch Throwable and not just IOE, since bad edits may generate
// NumberFormatExceptions, AssertionErrors, OutOfMemoryErrors, etc.
FSImage.LOG.debug("Caught exception after reading " + numValid +
" ops from " + in + " while determining its valid length.", t);
}
return new EditLogValidation(lastPos, numValid);
}
static class EditLogValidation {
@ -536,9 +503,9 @@ public class FSEditLogLoader {
}
/**
* Stream wrapper that keeps track of the current file position.
* Stream wrapper that keeps track of the current stream position.
*/
private static class PositionTrackingInputStream extends FilterInputStream {
static class PositionTrackingInputStream extends FilterInputStream {
private long curPos = 0;
private long markPos = -1;
@ -582,4 +549,5 @@ public class FSEditLogLoader {
return curPos;
}
}
}

View File

@ -36,6 +36,7 @@ import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.common.GenerationStamp;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.util.PureJavaCrc32;
import static org.apache.hadoop.hdfs.server.namenode.FSEditLogOpCodes.*;
import org.apache.hadoop.security.token.delegation.DelegationKey;
@ -1323,71 +1324,17 @@ public abstract class FSEditLogOp {
return longWritable.get();
}
}
/**
* Class to encapsulate the header at the top of a log file.
*/
static class LogHeader {
final int logVersion;
final Checksum checksum;
public LogHeader(int logVersion, Checksum checksum) {
this.logVersion = logVersion;
this.checksum = checksum;
}
static LogHeader read(DataInputStream in) throws IOException {
int logVersion = 0;
logVersion = FSEditLogOp.LogHeader.readLogVersion(in);
Checksum checksum = null;
if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
checksum = FSEditLog.getChecksum();
}
return new LogHeader(logVersion, checksum);
}
/**
* Read the header of fsedit log
* @param in fsedit stream
* @return the edit log version number
* @throws IOException if error occurs
*/
private static int readLogVersion(DataInputStream in) throws IOException {
int logVersion = 0;
// Read log file version. Could be missing.
in.mark(4);
// If edits log is greater than 2G, available method will return negative
// numbers, so we avoid having to call available
boolean available = true;
try {
logVersion = in.readByte();
} catch (EOFException e) {
available = false;
}
if (available) {
in.reset();
logVersion = in.readInt();
if (logVersion < FSConstants.LAYOUT_VERSION) // future version
throw new IOException(
"Unexpected version of the file system log file: "
+ logVersion + ". Current version = "
+ FSConstants.LAYOUT_VERSION + ".");
}
assert logVersion <= Storage.LAST_UPGRADABLE_LAYOUT_VERSION :
"Unsupported version " + logVersion;
return logVersion;
}
}
/**
* Class for writing editlog ops
*/
public static class Writer {
private final DataOutputBuffer buf;
private final Checksum checksum;
public Writer(DataOutputBuffer out) {
this.buf = out;
this.checksum = new PureJavaCrc32();
}
/**
@ -1402,7 +1349,6 @@ public abstract class FSEditLogOp {
buf.writeLong(op.txid);
op.writeFields(buf);
int end = buf.getLength();
Checksum checksum = FSEditLog.getChecksum();
checksum.reset();
checksum.update(buf.getData(), start, end-start);
int sum = (int)checksum.getValue();
@ -1422,19 +1368,22 @@ public abstract class FSEditLogOp {
* Construct the reader
* @param in The stream to read from.
* @param logVersion The version of the data coming from the stream.
* @param checksum Checksum being used with input stream.
*/
@SuppressWarnings("deprecation")
public Reader(DataInputStream in, int logVersion,
Checksum checksum) {
if (checksum != null) {
public Reader(DataInputStream in, int logVersion) {
this.logVersion = logVersion;
if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) {
this.checksum = new PureJavaCrc32();
} else {
this.checksum = null;
}
if (this.checksum != null) {
this.in = new DataInputStream(
new CheckedInputStream(in, checksum));
new CheckedInputStream(in, this.checksum));
} else {
this.in = in;
}
this.logVersion = logVersion;
this.checksum = checksum;
}
/**

View File

@ -137,10 +137,6 @@ public class FSImage implements Closeable {
FSImage.getCheckpointEditsDirs(conf, null));
storage = new NNStorage(conf, imageDirs, editsDirs);
if (ns != null) {
storage.setUpgradeManager(ns.upgradeManager);
}
if(conf.getBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY,
DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_DEFAULT)) {
storage.setRestoreFailedStorage(true);

View File

@ -330,7 +330,7 @@ class FSImageFormat {
int imgVersion = getLayoutVersion();
short replication = in.readShort();
replication = namesystem.adjustReplication(replication);
replication = namesystem.getBlockManager().adjustReplication(replication);
modificationTime = in.readLong();
if (LayoutVersion.supports(Feature.FILE_ACCESS_TIME, imgVersion)) {
atime = in.readLong();

View File

@ -96,4 +96,36 @@ abstract class FSImageStorageInspector {
return sb.toString();
}
}
/**
* Record of an image that has been located and had its filename parsed.
*/
static class FSImageFile {
final StorageDirectory sd;
final long txId;
private final File file;
FSImageFile(StorageDirectory sd, File file, long txId) {
assert txId >= 0 : "Invalid txid on " + file +": " + txId;
this.sd = sd;
this.txId = txId;
this.file = file;
}
File getFile() {
return file;
}
public long getCheckpointTxId() {
return txId;
}
@Override
public String toString() {
return String.format("FSImageFile(file=%s, cpktTxId=%019d)",
file.toString(), txId);
}
}
}

View File

@ -37,9 +37,9 @@ import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;
@ -54,17 +54,13 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
private boolean needToSave = false;
private boolean isUpgradeFinalized = true;
List<FoundFSImage> foundImages = new ArrayList<FoundFSImage>();
List<FoundEditLog> foundEditLogs = new ArrayList<FoundEditLog>();
List<FSImageFile> foundImages = new ArrayList<FSImageFile>();
List<EditLogFile> foundEditLogs = new ArrayList<EditLogFile>();
SortedMap<Long, LogGroup> logGroups = new TreeMap<Long, LogGroup>();
long maxSeenTxId = 0;
private static final Pattern IMAGE_REGEX = Pattern.compile(
NameNodeFile.IMAGE.getName() + "_(\\d+)");
private static final Pattern EDITS_REGEX = Pattern.compile(
NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
@Override
public void inspectDirectory(StorageDirectory sd) throws IOException {
@ -95,7 +91,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
if (sd.getStorageDirType().isOfType(NameNodeDirType.IMAGE)) {
try {
long txid = Long.valueOf(imageMatch.group(1));
foundImages.add(new FoundFSImage(sd, f, txid));
foundImages.add(new FSImageFile(sd, f, txid));
} catch (NumberFormatException nfe) {
LOG.error("Image file " + f + " has improperly formatted " +
"transaction ID");
@ -117,9 +113,10 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
LOG.warn("Unable to determine the max transaction ID seen by " + sd, ioe);
}
List<FoundEditLog> editLogs = matchEditLogs(filesInStorage);
List<EditLogFile> editLogs
= FileJournalManager.matchEditLogs(filesInStorage);
if (sd.getStorageDirType().isOfType(NameNodeDirType.EDITS)) {
for (FoundEditLog log : editLogs) {
for (EditLogFile log : editLogs) {
addEditLog(log);
}
} else if (!editLogs.isEmpty()){
@ -133,47 +130,12 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
isUpgradeFinalized = isUpgradeFinalized && !sd.getPreviousDir().exists();
}
static List<FoundEditLog> matchEditLogs(File[] filesInStorage) {
List<FoundEditLog> ret = Lists.newArrayList();
for (File f : filesInStorage) {
String name = f.getName();
// Check for edits
Matcher editsMatch = EDITS_REGEX.matcher(name);
if (editsMatch.matches()) {
try {
long startTxId = Long.valueOf(editsMatch.group(1));
long endTxId = Long.valueOf(editsMatch.group(2));
ret.add(new FoundEditLog(f, startTxId, endTxId));
} catch (NumberFormatException nfe) {
LOG.error("Edits file " + f + " has improperly formatted " +
"transaction ID");
// skip
}
}
// Check for in-progress edits
Matcher inProgressEditsMatch = EDITS_INPROGRESS_REGEX.matcher(name);
if (inProgressEditsMatch.matches()) {
try {
long startTxId = Long.valueOf(inProgressEditsMatch.group(1));
ret.add(
new FoundEditLog(f, startTxId, FoundEditLog.UNKNOWN_END));
} catch (NumberFormatException nfe) {
LOG.error("In-progress edits file " + f + " has improperly " +
"formatted transaction ID");
// skip
}
}
}
return ret;
}
private void addEditLog(FoundEditLog foundEditLog) {
private void addEditLog(EditLogFile foundEditLog) {
foundEditLogs.add(foundEditLog);
LogGroup group = logGroups.get(foundEditLog.startTxId);
LogGroup group = logGroups.get(foundEditLog.getFirstTxId());
if (group == null) {
group = new LogGroup(foundEditLog.startTxId);
logGroups.put(foundEditLog.startTxId, group);
group = new LogGroup(foundEditLog.getFirstTxId());
logGroups.put(foundEditLog.getFirstTxId(), group);
}
group.add(foundEditLog);
}
@ -191,9 +153,9 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
*
* Returns null if no images were found.
*/
FoundFSImage getLatestImage() {
FoundFSImage ret = null;
for (FoundFSImage img : foundImages) {
FSImageFile getLatestImage() {
FSImageFile ret = null;
for (FSImageFile img : foundImages) {
if (ret == null || img.txId > ret.txId) {
ret = img;
}
@ -201,11 +163,11 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
return ret;
}
public List<FoundFSImage> getFoundImages() {
public List<FSImageFile> getFoundImages() {
return ImmutableList.copyOf(foundImages);
}
public List<FoundEditLog> getFoundEditLogs() {
public List<EditLogFile> getEditLogFiles() {
return ImmutableList.copyOf(foundEditLogs);
}
@ -215,7 +177,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
throw new FileNotFoundException("No valid image files found");
}
FoundFSImage recoveryImage = getLatestImage();
FSImageFile recoveryImage = getLatestImage();
LogLoadPlan logPlan = createLogLoadPlan(recoveryImage.txId, Long.MAX_VALUE);
return new TransactionalLoadPlan(recoveryImage,
@ -233,7 +195,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
LogLoadPlan createLogLoadPlan(long sinceTxId, long maxStartTxId) throws IOException {
long expectedTxId = sinceTxId + 1;
List<FoundEditLog> recoveryLogs = new ArrayList<FoundEditLog>();
List<EditLogFile> recoveryLogs = new ArrayList<EditLogFile>();
SortedMap<Long, LogGroup> tailGroups = logGroups.tailMap(expectedTxId);
if (logGroups.size() > tailGroups.size()) {
@ -306,22 +268,6 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
return needToSave;
}
RemoteEditLogManifest getEditLogManifest(long sinceTxId) {
List<RemoteEditLog> logs = Lists.newArrayList();
for (LogGroup g : logGroups.values()) {
if (!g.hasFinalized) continue;
FoundEditLog fel = g.getBestNonCorruptLog();
if (fel.getLastTxId() < sinceTxId) continue;
logs.add(new RemoteEditLog(fel.getStartTxId(),
fel.getLastTxId()));
}
return new RemoteEditLogManifest(logs);
}
/**
* A group of logs that all start at the same txid.
*
@ -330,7 +276,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
*/
static class LogGroup {
long startTxId;
List<FoundEditLog> logs = new ArrayList<FoundEditLog>();;
List<EditLogFile> logs = new ArrayList<EditLogFile>();;
private Set<Long> endTxIds = new TreeSet<Long>();
private boolean hasInProgress = false;
private boolean hasFinalized = false;
@ -339,15 +285,15 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
this.startTxId = startTxId;
}
FoundEditLog getBestNonCorruptLog() {
EditLogFile getBestNonCorruptLog() {
// First look for non-corrupt finalized logs
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
if (!log.isCorrupt() && !log.isInProgress()) {
return log;
}
}
// Then look for non-corrupt in-progress logs
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
if (!log.isCorrupt()) {
return log;
}
@ -364,7 +310,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
* @return true if we can determine the last txid in this log group.
*/
boolean hasKnownLastTxId() {
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
if (!log.isInProgress()) {
return true;
}
@ -378,24 +324,24 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
* {@see #hasKnownLastTxId()}
*/
long getLastTxId() {
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
if (!log.isInProgress()) {
return log.lastTxId;
return log.getLastTxId();
}
}
throw new IllegalStateException("LogGroup only has in-progress logs");
}
void add(FoundEditLog log) {
assert log.getStartTxId() == startTxId;
void add(EditLogFile log) {
assert log.getFirstTxId() == startTxId;
logs.add(log);
if (log.isInProgress()) {
hasInProgress = true;
} else {
hasFinalized = true;
endTxIds.add(log.lastTxId);
endTxIds.add(log.getLastTxId());
}
}
@ -422,7 +368,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
* The in-progress logs in this case should be considered corrupt.
*/
private void planMixedLogRecovery() throws IOException {
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
if (log.isInProgress()) {
LOG.warn("Log at " + log.getFile() + " is in progress, but " +
"other logs starting at the same txid " + startTxId +
@ -446,7 +392,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
"crash)");
if (logs.size() == 1) {
// Only one log, it's our only choice!
FoundEditLog log = logs.get(0);
EditLogFile log = logs.get(0);
if (log.validateLog().numTransactions == 0) {
// If it has no transactions, we should consider it corrupt just
// to be conservative.
@ -459,7 +405,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
}
long maxValidTxnCount = Long.MIN_VALUE;
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
long validTxnCount = log.validateLog().numTransactions;
LOG.warn(" Log " + log.getFile() +
" valid txns=" + validTxnCount +
@ -467,7 +413,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
maxValidTxnCount = Math.max(maxValidTxnCount, validTxnCount);
}
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
long txns = log.validateLog().numTransactions;
if (txns < maxValidTxnCount) {
LOG.warn("Marking log at " + log.getFile() + " as corrupt since " +
@ -499,7 +445,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
}
void recover() throws IOException {
for (FoundEditLog log : logs) {
for (EditLogFile log : logs) {
if (log.isCorrupt()) {
log.moveAsideCorruptFile();
} else if (log.isInProgress()) {
@ -508,131 +454,12 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
}
}
}
/**
* Record of an image that has been located and had its filename parsed.
*/
static class FoundFSImage {
final StorageDirectory sd;
final long txId;
private final File file;
FoundFSImage(StorageDirectory sd, File file, long txId) {
assert txId >= 0 : "Invalid txid on " + file +": " + txId;
this.sd = sd;
this.txId = txId;
this.file = file;
}
File getFile() {
return file;
}
public long getTxId() {
return txId;
}
@Override
public String toString() {
return file.toString();
}
}
/**
* Record of an edit log that has been located and had its filename parsed.
*/
static class FoundEditLog {
File file;
final long startTxId;
long lastTxId;
private EditLogValidation cachedValidation = null;
private boolean isCorrupt = false;
static final long UNKNOWN_END = -1;
FoundEditLog(File file,
long startTxId, long endTxId) {
assert endTxId == UNKNOWN_END || endTxId >= startTxId;
assert startTxId > 0;
assert file != null;
this.startTxId = startTxId;
this.lastTxId = endTxId;
this.file = file;
}
public void finalizeLog() throws IOException {
long numTransactions = validateLog().numTransactions;
long lastTxId = startTxId + numTransactions - 1;
File dst = new File(file.getParentFile(),
NNStorage.getFinalizedEditsFileName(startTxId, lastTxId));
LOG.info("Finalizing edits log " + file + " by renaming to "
+ dst.getName());
if (!file.renameTo(dst)) {
throw new IOException("Couldn't finalize log " +
file + " to " + dst);
}
this.lastTxId = lastTxId;
file = dst;
}
long getStartTxId() {
return startTxId;
}
long getLastTxId() {
return lastTxId;
}
EditLogValidation validateLog() throws IOException {
if (cachedValidation == null) {
cachedValidation = FSEditLogLoader.validateEditLog(file);
}
return cachedValidation;
}
boolean isInProgress() {
return (lastTxId == UNKNOWN_END);
}
File getFile() {
return file;
}
void markCorrupt() {
isCorrupt = true;
}
boolean isCorrupt() {
return isCorrupt;
}
void moveAsideCorruptFile() throws IOException {
assert isCorrupt;
File src = file;
File dst = new File(src.getParent(), src.getName() + ".corrupt");
boolean success = src.renameTo(dst);
if (!success) {
throw new IOException(
"Couldn't rename corrupt log " + src + " to " + dst);
}
file = dst;
}
@Override
public String toString() {
return file.toString();
}
}
static class TransactionalLoadPlan extends LoadPlan {
final FoundFSImage image;
final FSImageFile image;
final LogLoadPlan logPlan;
public TransactionalLoadPlan(FoundFSImage image,
public TransactionalLoadPlan(FSImageFile image,
LogLoadPlan logPlan) {
super();
this.image = image;
@ -662,10 +489,10 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
}
static class LogLoadPlan {
final List<FoundEditLog> editLogs;
final List<EditLogFile> editLogs;
final List<LogGroup> logGroupsToRecover;
LogLoadPlan(List<FoundEditLog> editLogs,
LogLoadPlan(List<EditLogFile> editLogs,
List<LogGroup> logGroupsToRecover) {
this.editLogs = editLogs;
this.logGroupsToRecover = logGroupsToRecover;
@ -679,7 +506,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
public List<File> getEditsFiles() {
List<File> ret = new ArrayList<File>();
for (FoundEditLog log : editLogs) {
for (EditLogFile log : editLogs) {
ret.add(log.getFile());
}
return ret;

View File

@ -21,6 +21,7 @@ import java.io.IOException;
import java.io.PrintWriter;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import javax.net.SocketFactory;
import javax.servlet.ServletContext;
@ -36,11 +37,14 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.server.common.HdfsConstants;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
import org.apache.hadoop.net.NetUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
import org.znerd.xmlenc.XMLOutputter;
/** Servlets for file checksum */
@ -52,6 +56,32 @@ public class FileChecksumServlets {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
/** Create a redirection URL */
private URL createRedirectURL(UserGroupInformation ugi, DatanodeID host,
HttpServletRequest request, NameNode nn)
throws IOException {
final String hostname = host instanceof DatanodeInfo
? ((DatanodeInfo)host).getHostName() : host.getHost();
final String scheme = request.getScheme();
final int port = "https".equals(scheme)
? (Integer)getServletContext().getAttribute("datanode.https.port")
: host.getInfoPort();
final String encodedPath = ServletUtil.getRawPath(request, "/fileChecksum");
String dtParam = "";
if (UserGroupInformation.isSecurityEnabled()) {
String tokenString = ugi.getTokens().iterator().next().encodeToUrlString();
dtParam = JspHelper.getDelegationTokenUrlParam(tokenString);
}
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URL(scheme, hostname, port,
"/getFileChecksum" + encodedPath + '?' +
"ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
dtParam + addrParam);
}
/** {@inheritDoc} */
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
@ -62,12 +92,8 @@ public class FileChecksumServlets {
context);
final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
try {
final URI uri = createRedirectUri("/getFileChecksum", ugi, datanode,
request, namenode);
response.sendRedirect(uri.toURL().toString());
} catch(URISyntaxException e) {
throw new ServletException(e);
//response.getWriter().println(e.toString());
response.sendRedirect(
createRedirectURL(ugi, datanode, request, namenode).toString());
} catch (IOException e) {
response.sendError(400, e.getMessage());
}
@ -84,7 +110,7 @@ public class FileChecksumServlets {
public void doGet(HttpServletRequest request, HttpServletResponse response
) throws ServletException, IOException {
final PrintWriter out = response.getWriter();
final String filename = getFilename(request, response);
final String path = ServletUtil.getDecodedPath(request, "/getFileChecksum");
final XMLOutputter xml = new XMLOutputter(out, "UTF-8");
xml.declaration();
@ -103,12 +129,12 @@ public class FileChecksumServlets {
datanode, conf, getUGI(request, conf));
final ClientProtocol nnproxy = dfs.getNamenode();
final MD5MD5CRC32FileChecksum checksum = DFSClient.getFileChecksum(
filename, nnproxy, socketFactory, socketTimeout);
path, nnproxy, socketFactory, socketTimeout);
MD5MD5CRC32FileChecksum.write(xml, checksum);
} catch(IOException ioe) {
writeXml(ioe, filename, xml);
writeXml(ioe, path, xml);
} catch (InterruptedException e) {
writeXml(e, filename, xml);
writeXml(e, path, xml);
}
xml.endDocument();
}

View File

@ -18,8 +18,8 @@
package org.apache.hadoop.hdfs.server.namenode;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URL;
import java.security.PrivilegedExceptionAction;
import javax.servlet.http.HttpServletRequest;
@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
/** Redirect queries about the hosted filesystem to an appropriate datanode.
* @see org.apache.hadoop.hdfs.HftpFileSystem
@ -44,22 +45,25 @@ public class FileDataServlet extends DfsServlet {
/** For java.io.Serializable */
private static final long serialVersionUID = 1L;
/** Create a redirection URI */
protected URI createUri(String parent, HdfsFileStatus i, UserGroupInformation ugi,
ClientProtocol nnproxy, HttpServletRequest request, String dt)
throws IOException, URISyntaxException {
/** Create a redirection URL */
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status,
UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
throws IOException {
String scheme = request.getScheme();
final LocatedBlocks blks = nnproxy.getBlockLocations(
i.getFullPath(new Path(parent)).toUri().getPath(), 0, 1);
final DatanodeID host = pickSrcDatanode(blks, i);
status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
final DatanodeID host = pickSrcDatanode(blks, status);
final String hostname;
if (host instanceof DatanodeInfo) {
hostname = ((DatanodeInfo)host).getHostName();
} else {
hostname = host.getHost();
}
String dtParam="";
final int port = "https".equals(scheme)
? (Integer)getServletContext().getAttribute("datanode.https.port")
: host.getInfoPort();
String dtParam = "";
if (dt != null) {
dtParam=JspHelper.getDelegationTokenUrlParam(dt);
}
@ -70,12 +74,10 @@ public class FileDataServlet extends DfsServlet {
String addr = NameNode.getHostPortString(nn.getNameNodeAddress());
String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);
return new URI(scheme, null, hostname,
"https".equals(scheme)
? (Integer)getServletContext().getAttribute("datanode.https.port")
: host.getInfoPort(),
"/streamFile" + i.getFullName(parent),
"ugi=" + ugi.getShortUserName() + dtParam + addrParam, null);
return new URL(scheme, hostname, port,
"/streamFile" + encodedPath + '?' +
"ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
dtParam + addrParam);
}
/** Select a datanode to service this request.
@ -112,20 +114,15 @@ public class FileDataServlet extends DfsServlet {
@Override
public Void run() throws IOException {
ClientProtocol nn = createNameNodeProxy();
final String path = request.getPathInfo() != null ? request
.getPathInfo() : "/";
final String path = ServletUtil.getDecodedPath(request, "/data");
final String encodedPath = ServletUtil.getRawPath(request, "/data");
String delegationToken = request
.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
HdfsFileStatus info = nn.getFileInfo(path);
if (info != null && !info.isDir()) {
try {
response.sendRedirect(createUri(path, info, ugi, nn, request,
delegationToken).toURL().toString());
} catch (URISyntaxException e) {
response.getWriter().println(e.toString());
}
response.sendRedirect(createRedirectURL(path, encodedPath,
info, ugi, nn, request, delegationToken).toString());
} else if (info == null) {
response.sendError(400, "File not found " + path);
} else {

View File

@ -23,14 +23,21 @@ import org.apache.commons.logging.LogFactory;
import java.io.File;
import java.io.IOException;
import java.util.List;
import java.util.Comparator;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import org.apache.hadoop.fs.FileUtil;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
import org.apache.hadoop.hdfs.server.namenode.FSEditLogLoader.EditLogValidation;
import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeFile;
import org.apache.hadoop.hdfs.server.protocol.RemoteEditLog;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import com.google.common.collect.Lists;
import com.google.common.collect.ComparisonChain;
/**
* Journal manager for the common case of edits files being written
@ -45,6 +52,15 @@ class FileJournalManager implements JournalManager {
private final StorageDirectory sd;
private int outputBufferCapacity = 512*1024;
private static final Pattern EDITS_REGEX = Pattern.compile(
NameNodeFile.EDITS.getName() + "_(\\d+)-(\\d+)");
private static final Pattern EDITS_INPROGRESS_REGEX = Pattern.compile(
NameNodeFile.EDITS_INPROGRESS.getName() + "_(\\d+)");
@VisibleForTesting
StoragePurger purger
= new NNStorageRetentionManager.DeletionStoragePurger();
public FileJournalManager(StorageDirectory sd) {
this.sd = sd;
}
@ -91,13 +107,13 @@ class FileJournalManager implements JournalManager {
}
@Override
public void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
public void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException {
File[] files = FileUtil.listFiles(sd.getCurrentDir());
List<FoundEditLog> editLogs =
FSImageTransactionalStorageInspector.matchEditLogs(files);
for (FoundEditLog log : editLogs) {
if (log.getStartTxId() < minTxIdToKeep &&
List<EditLogFile> editLogs =
FileJournalManager.matchEditLogs(files);
for (EditLogFile log : editLogs) {
if (log.getFirstTxId() < minTxIdToKeep &&
log.getLastTxId() < minTxIdToKeep) {
purger.purgeLog(log);
}
@ -110,5 +126,167 @@ class FileJournalManager implements JournalManager {
File f = NNStorage.getInProgressEditsFile(sd, segmentStartsAtTxId);
return new EditLogFileInputStream(f);
}
/**
* Find all editlog segments starting at or above the given txid.
* @param fromTxId the txnid which to start looking
* @return a list of remote edit logs
* @throws IOException if edit logs cannot be listed.
*/
List<RemoteEditLog> getRemoteEditLogs(long firstTxId) throws IOException {
File currentDir = sd.getCurrentDir();
List<EditLogFile> allLogFiles = matchEditLogs(
FileUtil.listFiles(currentDir));
List<RemoteEditLog> ret = Lists.newArrayListWithCapacity(
allLogFiles.size());
for (EditLogFile elf : allLogFiles) {
if (elf.isCorrupt() || elf.isInProgress()) continue;
if (elf.getFirstTxId() >= firstTxId) {
ret.add(new RemoteEditLog(elf.firstTxId, elf.lastTxId));
} else if ((firstTxId > elf.getFirstTxId()) &&
(firstTxId <= elf.getLastTxId())) {
throw new IOException("Asked for firstTxId " + firstTxId
+ " which is in the middle of file " + elf.file);
}
}
return ret;
}
static List<EditLogFile> matchEditLogs(File[] filesInStorage) {
List<EditLogFile> ret = Lists.newArrayList();
for (File f : filesInStorage) {
String name = f.getName();
// Check for edits
Matcher editsMatch = EDITS_REGEX.matcher(name);
if (editsMatch.matches()) {
try {
long startTxId = Long.valueOf(editsMatch.group(1));
long endTxId = Long.valueOf(editsMatch.group(2));
ret.add(new EditLogFile(f, startTxId, endTxId));
} catch (NumberFormatException nfe) {
LOG.error("Edits file " + f + " has improperly formatted " +
"transaction ID");
// skip
}
}
// Check for in-progress edits
Matcher inProgressEditsMatch = EDITS_INPROGRESS_REGEX.matcher(name);
if (inProgressEditsMatch.matches()) {
try {
long startTxId = Long.valueOf(inProgressEditsMatch.group(1));
ret.add(
new EditLogFile(f, startTxId, EditLogFile.UNKNOWN_END));
} catch (NumberFormatException nfe) {
LOG.error("In-progress edits file " + f + " has improperly " +
"formatted transaction ID");
// skip
}
}
}
return ret;
}
/**
* Record of an edit log that has been located and had its filename parsed.
*/
static class EditLogFile {
private File file;
private final long firstTxId;
private long lastTxId;
private EditLogValidation cachedValidation = null;
private boolean isCorrupt = false;
static final long UNKNOWN_END = -1;
final static Comparator<EditLogFile> COMPARE_BY_START_TXID
= new Comparator<EditLogFile>() {
public int compare(EditLogFile a, EditLogFile b) {
return ComparisonChain.start()
.compare(a.getFirstTxId(), b.getFirstTxId())
.compare(a.getLastTxId(), b.getLastTxId())
.result();
}
};
EditLogFile(File file,
long firstTxId, long lastTxId) {
assert lastTxId == UNKNOWN_END || lastTxId >= firstTxId;
assert firstTxId > 0;
assert file != null;
this.firstTxId = firstTxId;
this.lastTxId = lastTxId;
this.file = file;
}
public void finalizeLog() throws IOException {
long numTransactions = validateLog().numTransactions;
long lastTxId = firstTxId + numTransactions - 1;
File dst = new File(file.getParentFile(),
NNStorage.getFinalizedEditsFileName(firstTxId, lastTxId));
LOG.info("Finalizing edits log " + file + " by renaming to "
+ dst.getName());
if (!file.renameTo(dst)) {
throw new IOException("Couldn't finalize log " +
file + " to " + dst);
}
this.lastTxId = lastTxId;
file = dst;
}
long getFirstTxId() {
return firstTxId;
}
long getLastTxId() {
return lastTxId;
}
EditLogValidation validateLog() throws IOException {
if (cachedValidation == null) {
cachedValidation = EditLogFileInputStream.validateEditLog(file);
}
return cachedValidation;
}
boolean isInProgress() {
return (lastTxId == UNKNOWN_END);
}
File getFile() {
return file;
}
void markCorrupt() {
isCorrupt = true;
}
boolean isCorrupt() {
return isCorrupt;
}
void moveAsideCorruptFile() throws IOException {
assert isCorrupt;
File src = file;
File dst = new File(src.getParent(), src.getName() + ".corrupt");
boolean success = src.renameTo(dst);
if (!success) {
throw new IOException(
"Couldn't rename corrupt log " + src + " to " + dst);
}
file = dst;
}
@Override
public String toString() {
return String.format("EditLogFile(file=%s,first=%019d,last=%019d,"
+"inProgress=%b,corrupt=%b)", file.toString(),
firstTxId, lastTxId, isInProgress(), isCorrupt);
}
}
}

View File

@ -30,6 +30,7 @@ import javax.servlet.http.HttpServletResponse;
import org.apache.hadoop.classification.InterfaceAudience;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.security.UserGroupInformation;
/**
@ -59,13 +60,12 @@ public class FsckServlet extends DfsServlet {
NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
final FSNamesystem namesystem = nn.getNamesystem();
final BlockManager bm = namesystem.getBlockManager();
final int totalDatanodes =
namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
final short minReplication = namesystem.getMinReplication();
namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE);
new NamenodeFsck(conf, nn,
NamenodeJspHelper.getNetworkTopology(nn), pmap, out,
totalDatanodes, minReplication, remoteAddress).fsck();
bm.getDatanodeManager().getNetworkTopology(), pmap, out,
totalDatanodes, bm.minReplication, remoteAddress).fsck();
return null;
}

View File

@ -55,7 +55,7 @@ interface JournalManager {
* @param purger the purging implementation to use
* @throws IOException if purging fails
*/
void purgeLogsOlderThan(long minTxIdToKeep, StoragePurger purger)
void purgeLogsOlderThan(long minTxIdToKeep)
throws IOException;
/**

View File

@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.util.ServletUtil;
import org.apache.hadoop.util.VersionInfo;
import org.znerd.xmlenc.*;
@ -86,8 +87,7 @@ public class ListPathsServlet extends DfsServlet {
*/
protected Map<String,String> buildRoot(HttpServletRequest request,
XMLOutputter doc) {
final String path = request.getPathInfo() != null
? request.getPathInfo() : "/";
final String path = ServletUtil.getDecodedPath(request, "/listPaths");
final String exclude = request.getParameter("exclude") != null
? request.getParameter("exclude") : "";
final String filter = request.getParameter("filter") != null
@ -135,6 +135,7 @@ public class ListPathsServlet extends DfsServlet {
final Map<String, String> root = buildRoot(request, doc);
final String path = root.get("path");
final String filePath = ServletUtil.getDecodedPath(request, "/listPaths");
try {
final boolean recur = "yes".equals(root.get("recursive"));
@ -153,7 +154,7 @@ public class ListPathsServlet extends DfsServlet {
doc.attribute(m.getKey(), m.getValue());
}
HdfsFileStatus base = nn.getFileInfo(path);
HdfsFileStatus base = nn.getFileInfo(filePath);
if ((base != null) && base.isDir()) {
writeInfo(base.getFullPath(new Path(path)), base, doc);
}

View File

@ -27,8 +27,8 @@ import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hdfs.DFSConfigKeys;
import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundFSImage;
import org.apache.hadoop.hdfs.server.namenode.FSImageStorageInspector.FSImageFile;
import org.apache.hadoop.hdfs.server.namenode.FileJournalManager.EditLogFile;
import org.apache.hadoop.hdfs.util.MD5FileUtils;
import com.google.common.collect.Lists;
@ -80,14 +80,14 @@ public class NNStorageRetentionManager {
// If fsimage_N is the image we want to keep, then we need to keep
// all txns > N. We can remove anything < N+1, since fsimage_N
// reflects the state up to and including N.
editLog.purgeLogsOlderThan(minImageTxId + 1, purger);
editLog.purgeLogsOlderThan(minImageTxId + 1);
}
private void purgeCheckpointsOlderThan(
FSImageTransactionalStorageInspector inspector,
long minTxId) {
for (FoundFSImage image : inspector.getFoundImages()) {
if (image.getTxId() < minTxId) {
for (FSImageFile image : inspector.getFoundImages()) {
if (image.getCheckpointTxId() < minTxId) {
LOG.info("Purging old image " + image);
purger.purgeImage(image);
}
@ -101,10 +101,10 @@ public class NNStorageRetentionManager {
*/
private long getImageTxIdToRetain(FSImageTransactionalStorageInspector inspector) {
List<FoundFSImage> images = inspector.getFoundImages();
List<FSImageFile> images = inspector.getFoundImages();
TreeSet<Long> imageTxIds = Sets.newTreeSet();
for (FoundFSImage image : images) {
imageTxIds.add(image.getTxId());
for (FSImageFile image : images) {
imageTxIds.add(image.getCheckpointTxId());
}
List<Long> imageTxIdsList = Lists.newArrayList(imageTxIds);
@ -124,18 +124,18 @@ public class NNStorageRetentionManager {
* Interface responsible for disposing of old checkpoints and edit logs.
*/
static interface StoragePurger {
void purgeLog(FoundEditLog log);
void purgeImage(FoundFSImage image);
void purgeLog(EditLogFile log);
void purgeImage(FSImageFile image);
}
static class DeletionStoragePurger implements StoragePurger {
@Override
public void purgeLog(FoundEditLog log) {
public void purgeLog(EditLogFile log) {
deleteOrWarn(log.getFile());
}
@Override
public void purgeImage(FoundFSImage image) {
public void purgeImage(FSImageFile image) {
deleteOrWarn(image.getFile());
deleteOrWarn(MD5FileUtils.getDigestFileForFile(image.getFile()));
}

View File

@ -58,6 +58,11 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.apache.hadoop.hdfs.protocol.DirectoryListing;
import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
import org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_LENGTH;
import static org.apache.hadoop.hdfs.protocol.FSConstants.MAX_PATH_DEPTH;
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
import org.apache.hadoop.hdfs.protocol.LocatedBlock;
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@ -146,7 +151,7 @@ import org.apache.hadoop.util.StringUtils;
* NameNode state, for example partial blocksMap etc.
**********************************************************/
@InterfaceAudience.Private
public class NameNode implements NamenodeProtocols, FSConstants {
public class NameNode implements NamenodeProtocols {
static{
HdfsConfiguration.init();
}
@ -654,7 +659,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
"Unexpected not positive size: "+size);
}
return namesystem.getBlocks(datanode, size);
return namesystem.getBlockManager().getBlocks(datanode, size);
}
@Override // NamenodeProtocol
@ -750,8 +755,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
+src+" for "+clientName+" at "+clientMachine);
}
if (!checkPathLength(src)) {
throw new IOException("create: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
throw new IOException("create: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.startFile(src,
new PermissionStatus(UserGroupInformation.getCurrentUser().getShortUserName(),
@ -898,7 +903,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
DatanodeInfo[] nodes = blocks[i].getLocations();
for (int j = 0; j < nodes.length; j++) {
DatanodeInfo dn = nodes[j];
namesystem.markBlockAsCorrupt(blk, dn);
namesystem.getBlockManager().findAndMarkBlockAsCorrupt(blk, dn);
}
}
}
@ -944,8 +949,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
boolean ret = namesystem.renameTo(src, dst);
if (ret) {
@ -968,8 +973,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
}
if (!checkPathLength(dst)) {
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
throw new IOException("rename: Pathname too long. Limit "
+ MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
}
namesystem.renameTo(src, dst, options);
metrics.incrFilesRenamed();
@ -1100,7 +1105,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
@Override // ClientProtocol
public void refreshNodes() throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.refreshNodes(new HdfsConfiguration());
namesystem.getBlockManager().getDatanodeManager().refreshNodes(
new HdfsConfiguration());
}
@Override // NamenodeProtocol
@ -1119,7 +1125,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
public RemoteEditLogManifest getEditLogManifest(long sinceTxId)
throws IOException {
// TODO:HA decide on OperationCategory for this
return namesystem.getEditLogManifest(sinceTxId);
return namesystem.getEditLog().getEditLogManifest(sinceTxId);
}
@Override // ClientProtocol
@ -1167,7 +1173,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
@Override // ClientProtocol
public void setBalancerBandwidth(long bandwidth) throws IOException {
// TODO:HA decide on OperationCategory for this
namesystem.setBalancerBandwidth(bandwidth);
namesystem.getBlockManager().getDatanodeManager().setBalancerBandwidth(bandwidth);
}
@Override // ClientProtocol
@ -1271,7 +1277,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
+ " blocks");
}
namesystem.processReport(nodeReg, poolId, blist);
namesystem.getBlockManager().processReport(nodeReg, poolId, blist);
if (getFSImage().isUpgradeFinalized())
return new DatanodeCommand.Finalize(poolId);
return null;
@ -1286,7 +1292,8 @@ public class NameNode implements NamenodeProtocols, FSConstants {
+"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
}
for (int i = 0; i < blocks.length; i++) {
namesystem.blockReceived(nodeReg, poolId, blocks[i], delHints[i]);
namesystem.getBlockManager().blockReceived(
nodeReg, poolId, blocks[i], delHints[i]);
}
}
@ -1305,7 +1312,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
LOG.warn("Disk error on " + dnName + ": " + msg);
} else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) {
LOG.warn("Fatal disk error on " + dnName + ": " + msg);
namesystem.removeDatanode(nodeReg);
namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg);
} else {
LOG.info("Error report from " + dnName + ": " + msg);
}
@ -1347,7 +1354,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
* @throws IOException
*/
public void verifyVersion(int version) throws IOException {
if (version != LAYOUT_VERSION)
if (version != FSConstants.LAYOUT_VERSION)
throw new IncorrectVersionException(version, "data node");
}

View File

@ -646,46 +646,77 @@ public class NamenodeFsck {
/** {@inheritDoc} */
public String toString() {
StringBuilder res = new StringBuilder();
res.append("Status: " + (isHealthy() ? "HEALTHY" : "CORRUPT"));
res.append("\n Total size:\t" + totalSize + " B");
if (totalOpenFilesSize != 0)
res.append(" (Total open files size: " + totalOpenFilesSize + " B)");
res.append("\n Total dirs:\t" + totalDirs);
res.append("\n Total files:\t" + totalFiles);
if (totalOpenFiles != 0)
res.append(" (Files currently being written: " +
totalOpenFiles + ")");
res.append("\n Total blocks (validated):\t" + totalBlocks);
if (totalBlocks > 0) res.append(" (avg. block size "
+ (totalSize / totalBlocks) + " B)");
if (totalOpenFilesBlocks != 0)
res.append(" (Total open file blocks (not validated): " +
totalOpenFilesBlocks + ")");
if (corruptFiles > 0) {
res.append("\n ********************************");
res.append("\n CORRUPT FILES:\t" + corruptFiles);
res.append("Status: ").append((isHealthy() ? "HEALTHY" : "CORRUPT"))
.append("\n Total size:\t").append(totalSize).append(" B");
if (totalOpenFilesSize != 0) {
res.append(" (Total open files size: ").append(totalOpenFilesSize)
.append(" B)");
}
res.append("\n Total dirs:\t").append(totalDirs).append(
"\n Total files:\t").append(totalFiles);
if (totalOpenFiles != 0) {
res.append(" (Files currently being written: ").append(totalOpenFiles)
.append(")");
}
res.append("\n Total blocks (validated):\t").append(totalBlocks);
if (totalBlocks > 0) {
res.append(" (avg. block size ").append((totalSize / totalBlocks))
.append(" B)");
}
if (totalOpenFilesBlocks != 0) {
res.append(" (Total open file blocks (not validated): ").append(
totalOpenFilesBlocks).append(")");
}
if (corruptFiles > 0) {
res.append("\n ********************************").append(
"\n CORRUPT FILES:\t").append(corruptFiles);
if (missingSize > 0) {
res.append("\n MISSING BLOCKS:\t" + missingIds.size());
res.append("\n MISSING SIZE:\t\t" + missingSize + " B");
res.append("\n MISSING BLOCKS:\t").append(missingIds.size()).append(
"\n MISSING SIZE:\t\t").append(missingSize).append(" B");
}
if (corruptBlocks > 0) {
res.append("\n CORRUPT BLOCKS: \t" + corruptBlocks);
res.append("\n CORRUPT BLOCKS: \t").append(corruptBlocks);
}
res.append("\n ********************************");
}
res.append("\n Minimally replicated blocks:\t" + numMinReplicatedBlocks);
if (totalBlocks > 0) res.append(" (" + ((float) (numMinReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
res.append("\n Over-replicated blocks:\t" + numOverReplicatedBlocks);
if (totalBlocks > 0) res.append(" (" + ((float) (numOverReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
res.append("\n Under-replicated blocks:\t" + numUnderReplicatedBlocks);
if (totalBlocks > 0) res.append(" (" + ((float) (numUnderReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
res.append("\n Mis-replicated blocks:\t\t" + numMisReplicatedBlocks);
if (totalBlocks > 0) res.append(" (" + ((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks) + " %)");
res.append("\n Default replication factor:\t" + replication);
res.append("\n Average block replication:\t" + getReplicationFactor());
res.append("\n Corrupt blocks:\t\t" + corruptBlocks);
res.append("\n Missing replicas:\t\t" + missingReplicas);
if (totalReplicas > 0) res.append(" (" + ((float) (missingReplicas * 100) / (float) totalReplicas) + " %)");
res.append("\n Minimally replicated blocks:\t").append(
numMinReplicatedBlocks);
if (totalBlocks > 0) {
res.append(" (").append(
((float) (numMinReplicatedBlocks * 100) / (float) totalBlocks))
.append(" %)");
}
res.append("\n Over-replicated blocks:\t")
.append(numOverReplicatedBlocks);
if (totalBlocks > 0) {
res.append(" (").append(
((float) (numOverReplicatedBlocks * 100) / (float) totalBlocks))
.append(" %)");
}
res.append("\n Under-replicated blocks:\t").append(
numUnderReplicatedBlocks);
if (totalBlocks > 0) {
res.append(" (").append(
((float) (numUnderReplicatedBlocks * 100) / (float) totalBlocks))
.append(" %)");
}
res.append("\n Mis-replicated blocks:\t\t")
.append(numMisReplicatedBlocks);
if (totalBlocks > 0) {
res.append(" (").append(
((float) (numMisReplicatedBlocks * 100) / (float) totalBlocks))
.append(" %)");
}
res.append("\n Default replication factor:\t").append(replication)
.append("\n Average block replication:\t").append(
getReplicationFactor()).append("\n Corrupt blocks:\t\t").append(
corruptBlocks).append("\n Missing replicas:\t\t").append(
missingReplicas);
if (totalReplicas > 0) {
res.append(" (").append(
((float) (missingReplicas * 100) / (float) totalReplicas)).append(
" %)");
}
return res.toString();
}
}

View File

@ -42,13 +42,14 @@ import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.protocol.DatanodeID;
import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
import org.apache.hadoop.hdfs.server.common.JspHelper;
import org.apache.hadoop.hdfs.server.common.Storage;
import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.net.NetworkTopology;
import org.apache.hadoop.net.NodeBase;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.security.token.Token;
@ -229,14 +230,10 @@ class NamenodeJspHelper {
void generateHealthReport(JspWriter out, NameNode nn,
HttpServletRequest request) throws IOException {
FSNamesystem fsn = nn.getNamesystem();
ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
fsn.DFSNodesStatus(live, dead);
// If a data node has been first included in the include list,
// then decommissioned, then removed from both include and exclude list.
// We make the web console to "forget" this node by not displaying it.
fsn.removeDecomNodeFromList(live);
fsn.removeDecomNodeFromList(dead);
final DatanodeManager dm = fsn.getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, dead, true);
int liveDecommissioned = 0;
for (DatanodeDescriptor d : live) {
@ -248,8 +245,7 @@ class NamenodeJspHelper {
deadDecommissioned += d.isDecommissioned() ? 1 : 0;
}
ArrayList<DatanodeDescriptor> decommissioning = fsn
.getDecommissioningNodes();
final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
sorterField = request.getParameter("sorter/field");
sorterOrder = request.getParameter("sorter/order");
@ -349,7 +345,7 @@ class NamenodeJspHelper {
+ colTxt() + ":" + colTxt() + decommissioning.size()
+ rowTxt() + colTxt("Excludes missing blocks.")
+ "Number of Under-Replicated Blocks" + colTxt() + ":" + colTxt()
+ fsn.getUnderReplicatedNotMissingBlocks()
+ fsn.getBlockManager().getUnderReplicatedNotMissingBlocks()
+ "</table></div><br>\n");
if (live.isEmpty() && dead.isEmpty()) {
@ -370,15 +366,10 @@ class NamenodeJspHelper {
return token == null ? null : token.encodeToUrlString();
}
/** @return the network topology. */
static NetworkTopology getNetworkTopology(final NameNode namenode) {
return namenode.getNamesystem().getBlockManager().getDatanodeManager(
).getNetworkTopology();
}
/** @return a randomly chosen datanode. */
static DatanodeDescriptor getRandomDatanode(final NameNode namenode) {
return (DatanodeDescriptor)getNetworkTopology(namenode).chooseRandom(
return (DatanodeDescriptor)namenode.getNamesystem().getBlockManager(
).getDatanodeManager().getNetworkTopology().chooseRandom(
NodeBase.ROOT);
}
@ -564,12 +555,14 @@ class NamenodeJspHelper {
void generateNodesList(ServletContext context, JspWriter out,
HttpServletRequest request) throws IOException {
ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
final NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
nn.getNamesystem().DFSNodesStatus(live, dead);
nn.getNamesystem().removeDecomNodeFromList(live);
nn.getNamesystem().removeDecomNodeFromList(dead);
final FSNamesystem ns = nn.getNamesystem();
final DatanodeManager dm = ns.getBlockManager().getDatanodeManager();
final List<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
final List<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
dm.fetchDatanodes(live, dead, true);
InetSocketAddress nnSocketAddress = (InetSocketAddress) context
.getAttribute(NameNodeHttpServer.NAMENODE_ADDRESS_ATTRIBUTE_KEY);
String nnaddr = nnSocketAddress.getAddress().getHostAddress() + ":"
@ -678,8 +671,7 @@ class NamenodeJspHelper {
}
} else if (whatNodes.equals("DECOMMISSIONING")) {
// Decommissioning Nodes
ArrayList<DatanodeDescriptor> decommissioning = nn.getNamesystem()
.getDecommissioningNodes();
final List<DatanodeDescriptor> decommissioning = dm.getDecommissioningNodes();
out.print("<br> <a name=\"DecommissioningNodes\" id=\"title\"> "
+ " Decommissioning Datanodes : " + decommissioning.size()
+ "</a><br><br>\n");
@ -715,16 +707,17 @@ class NamenodeJspHelper {
static class XMLBlockInfo {
final Block block;
final INodeFile inode;
final FSNamesystem fsn;
final BlockManager blockManager;
public XMLBlockInfo(FSNamesystem fsn, Long blockId) {
this.fsn = fsn;
XMLBlockInfo(FSNamesystem fsn, Long blockId) {
this.blockManager = fsn.getBlockManager();
if (blockId == null) {
this.block = null;
this.inode = null;
} else {
this.block = new Block(blockId);
this.inode = fsn.getBlockManager().getINode(block);
this.inode = blockManager.getINode(block);
}
}
@ -798,31 +791,25 @@ class NamenodeJspHelper {
}
doc.startTag("replicas");
if (fsn.getBlockManager().blocksMap.contains(block)) {
Iterator<DatanodeDescriptor> it =
fsn.getBlockManager().blocksMap.nodeIterator(block);
for(final Iterator<DatanodeDescriptor> it = blockManager.datanodeIterator(block);
it.hasNext(); ) {
doc.startTag("replica");
while (it.hasNext()) {
doc.startTag("replica");
DatanodeDescriptor dd = it.next();
DatanodeDescriptor dd = it.next();
doc.startTag("host_name");
doc.pcdata(dd.getHostName());
doc.endTag();
doc.startTag("host_name");
doc.pcdata(dd.getHostName());
doc.endTag();
boolean isCorrupt = fsn.getCorruptReplicaBlockIds(0,
block.getBlockId()) != null;
doc.startTag("is_corrupt");
doc.pcdata(""+isCorrupt);
doc.endTag();
doc.endTag(); // </replica>
}
}
boolean isCorrupt = blockManager.getCorruptReplicaBlockIds(0,
block.getBlockId()) != null;
doc.startTag("is_corrupt");
doc.pcdata(""+isCorrupt);
doc.endTag();
doc.endTag(); // </replica>
}
doc.endTag(); // </replicas>
}
@ -834,14 +821,14 @@ class NamenodeJspHelper {
// utility class used in corrupt_replicas_xml.jsp
static class XMLCorruptBlockInfo {
final FSNamesystem fsn;
final Configuration conf;
final Long startingBlockId;
final int numCorruptBlocks;
final BlockManager blockManager;
public XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
XMLCorruptBlockInfo(FSNamesystem fsn, Configuration conf,
int numCorruptBlocks, Long startingBlockId) {
this.fsn = fsn;
this.blockManager = fsn.getBlockManager();
this.conf = conf;
this.numCorruptBlocks = numCorruptBlocks;
this.startingBlockId = startingBlockId;
@ -864,17 +851,16 @@ class NamenodeJspHelper {
doc.endTag();
doc.startTag("num_missing_blocks");
doc.pcdata(""+fsn.getMissingBlocksCount());
doc.pcdata(""+blockManager.getMissingBlocksCount());
doc.endTag();
doc.startTag("num_corrupt_replica_blocks");
doc.pcdata(""+fsn.getCorruptReplicaBlocks());
doc.pcdata(""+blockManager.getCorruptReplicaBlocksCount());
doc.endTag();
doc.startTag("corrupt_replica_block_ids");
long[] corruptBlockIds
= fsn.getCorruptReplicaBlockIds(numCorruptBlocks,
startingBlockId);
final long[] corruptBlockIds = blockManager.getCorruptReplicaBlockIds(
numCorruptBlocks, startingBlockId);
if (corruptBlockIds != null) {
for (Long blockId: corruptBlockIds) {
doc.startTag("block_id");

View File

@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.ServletUtil;
import org.mortbay.jetty.InclusiveByteRange;
@InterfaceAudience.Private
@ -57,13 +58,14 @@ public class StreamFile extends DfsServlet {
final DataNode datanode = (DataNode) context.getAttribute("datanode");
return DatanodeJspHelper.getDFSClient(request, datanode, conf, ugi);
}
@SuppressWarnings("unchecked")
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
final String path = request.getPathInfo() != null ?
request.getPathInfo() : "/";
final String path = ServletUtil.getDecodedPath(request, "/streamFile");
final String rawPath = ServletUtil.getRawPath(request, "/streamFile");
final String filename = JspHelper.validatePath(path);
final String rawFilename = JspHelper.validatePath(rawPath);
if (filename == null) {
response.setContentType("text/plain");
PrintWriter out = response.getWriter();
@ -98,7 +100,7 @@ public class StreamFile extends DfsServlet {
} else {
// No ranges, so send entire file
response.setHeader("Content-Disposition", "attachment; filename=\"" +
filename + "\"");
rawFilename + "\"");
response.setContentType("application/octet-stream");
response.setHeader(CONTENT_LENGTH, "" + fileLen);
StreamFile.copyFromOffset(in, out, 0L, fileLen);

View File

@ -41,7 +41,7 @@ import com.google.common.collect.Lists;
/**
* This class provides fetching a specified file from the NameNode.
*/
class TransferFsImage implements FSConstants {
class TransferFsImage {
public final static String CONTENT_LENGTH = "Content-Length";
public final static String MD5_HEADER = "X-MD5-Digest";
@ -69,6 +69,8 @@ class TransferFsImage implements FSConstants {
static void downloadEditsToStorage(String fsName, RemoteEditLog log,
NNStorage dstStorage) throws IOException {
assert log.getStartTxId() > 0 && log.getEndTxId() > 0 :
"bad log: " + log;
String fileid = GetImageServlet.getParamStringForLog(
log, dstStorage);
String fileName = NNStorage.getFinalizedEditsFileName(
@ -122,7 +124,7 @@ class TransferFsImage implements FSConstants {
static void getFileServer(OutputStream outstream, File localfile,
DataTransferThrottler throttler)
throws IOException {
byte buf[] = new byte[BUFFER_SIZE];
byte buf[] = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
FileInputStream infile = null;
try {
infile = new FileInputStream(localfile);
@ -137,7 +139,7 @@ class TransferFsImage implements FSConstants {
&& localfile.getAbsolutePath().contains("fsimage")) {
// Test sending image shorter than localfile
long len = localfile.length();
buf = new byte[(int)Math.min(len/2, BUFFER_SIZE)];
buf = new byte[(int)Math.min(len/2, FSConstants.IO_FILE_BUFFER_SIZE)];
// This will read at most half of the image
// and the rest of the image will be sent over the wire
infile.read(buf);
@ -177,7 +179,7 @@ class TransferFsImage implements FSConstants {
static MD5Hash getFileClient(String nnHostPort,
String queryString, List<File> localPaths,
NNStorage dstStorage, boolean getChecksum) throws IOException {
byte[] buf = new byte[BUFFER_SIZE];
byte[] buf = new byte[FSConstants.IO_FILE_BUFFER_SIZE];
String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
StringBuilder str = new StringBuilder(proto+nnHostPort+"/getimage?");
str.append(queryString);

View File

@ -20,11 +20,15 @@ package org.apache.hadoop.hdfs.server.protocol;
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.Comparator;
import org.apache.hadoop.hdfs.protocol.FSConstants;
import org.apache.hadoop.io.Writable;
public class RemoteEditLog implements Writable {
import com.google.common.base.Function;
import com.google.common.collect.ComparisonChain;
public class RemoteEditLog implements Writable, Comparable<RemoteEditLog> {
private long startTxId = FSConstants.INVALID_TXID;
private long endTxId = FSConstants.INVALID_TXID;
@ -60,5 +64,34 @@ public class RemoteEditLog implements Writable {
startTxId = in.readLong();
endTxId = in.readLong();
}
@Override
public int compareTo(RemoteEditLog log) {
return ComparisonChain.start()
.compare(startTxId, log.startTxId)
.compare(endTxId, log.endTxId)
.result();
}
@Override
public boolean equals(Object o) {
if (!(o instanceof RemoteEditLog)) return false;
return this.compareTo((RemoteEditLog)o) == 0;
}
@Override
public int hashCode() {
return (int) (startTxId * endTxId);
}
/**
* Guava <code>Function</code> which applies {@link #getStartTxId()}
*/
public static final Function<RemoteEditLog, Long> GET_START_TXID =
new Function<RemoteEditLog, Long>() {
@Override
public Long apply(RemoteEditLog log) {
return log.getStartTxId();
}
};
}

View File

@ -15,7 +15,7 @@
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.util;
package org.apache.hadoop.hdfs.util;
import java.util.Iterator;
import java.util.Map;
@ -110,4 +110,4 @@ public class CyclicIteration<K, V> implements Iterable<Map.Entry<K, V>> {
throw new UnsupportedOperationException("Not supported");
}
}
}
}

View File

@ -0,0 +1,36 @@
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hdfs.util;
/**
* A Holder is simply a wrapper around some other object. This is useful
* in particular for storing immutable values like boxed Integers in a
* collection without having to do the &quot;lookup&quot; of the value twice.
*/
public class Holder<T> {
public T held;
public Holder(T held) {
this.held = held;
}
@Override
public String toString() {
return String.valueOf(held);
}
}

Some files were not shown because too many files have changed in this diff Show More