Merge branch 'branch-1' of https://git-wip-us.apache.org/repos/asf/hbase into branch-1
This commit is contained in:
commit
254af5a321
|
@ -0,0 +1,82 @@
|
|||
#!/usr/bin/python
|
||||
##
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
##
|
||||
# script to find hanging test from Jenkins build output
|
||||
# usage: ./findHangingTests.py <url of Jenkins build console>
|
||||
#
|
||||
import urllib2
|
||||
import sys
|
||||
import string
|
||||
if len(sys.argv) != 2 :
|
||||
print "ERROR : Provide the jenkins job console URL as the only argument."
|
||||
exit(1)
|
||||
print "Fetching " + sys.argv[1]
|
||||
response = urllib2.urlopen(sys.argv[1])
|
||||
i = 0;
|
||||
tests = {}
|
||||
failed_tests = {}
|
||||
summary = 0
|
||||
host = False
|
||||
patch = False
|
||||
branch = False
|
||||
while True:
|
||||
n = response.readline()
|
||||
if n == "" :
|
||||
break
|
||||
if not host and n.find("Building remotely on") >= 0:
|
||||
host = True
|
||||
print n.strip()
|
||||
continue
|
||||
if not patch and n.find("Testing patch for ") >= 0:
|
||||
patch = True
|
||||
print n.strip()
|
||||
continue
|
||||
if not branch and n.find("Testing patch on branch ") >= 0:
|
||||
branch = True
|
||||
print n.strip()
|
||||
continue
|
||||
if n.find("PATCH APPLICATION FAILED") >= 0:
|
||||
print "PATCH APPLICATION FAILED"
|
||||
sys.exit(1)
|
||||
if summary == 0 and n.find("Running tests.") >= 0:
|
||||
summary = summary + 1
|
||||
continue
|
||||
if summary == 1 and n.find("[INFO] Reactor Summary:") >= 0:
|
||||
summary = summary + 1
|
||||
continue
|
||||
if summary == 2 and n.find("[INFO] Apache HBase ") >= 0:
|
||||
sys.stdout.write(n)
|
||||
continue
|
||||
if n.find("org.apache.hadoop.hbase") < 0:
|
||||
continue
|
||||
test_name = string.strip(n[n.find("org.apache.hadoop.hbase"):len(n)])
|
||||
if n.find("Running org.apache.hadoop.hbase") > -1 :
|
||||
tests[test_name] = False
|
||||
if n.find("Tests run:") > -1 :
|
||||
if n.find("FAILURE") > -1 or n.find("ERROR") > -1:
|
||||
failed_tests[test_name] = True
|
||||
tests[test_name] = True
|
||||
response.close()
|
||||
|
||||
print "Printing hanging tests"
|
||||
for key, value in tests.iteritems():
|
||||
if value == False:
|
||||
print "Hanging test : " + key
|
||||
print "Printing Failing tests"
|
||||
for key, value in failed_tests.iteritems():
|
||||
print "Failing test : " + key
|
|
@ -30,7 +30,7 @@ export CLOVER_HOME=/home/jenkins/tools/clover/latest
|
|||
export MAVEN_HOME=/home/jenkins/tools/maven/latest
|
||||
|
||||
export PATH=$PATH:$JAVA_HOME/bin:$ANT_HOME/bin:
|
||||
export MAVEN_OPTS="-Xmx3100M -XX:-UsePerfData"
|
||||
export MAVEN_OPTS="${MAVEN_OPTS:-"-Xmx3100M -XX:-UsePerfData -XX:MaxPermSize=256m"}"
|
||||
|
||||
ulimit -n
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@
|
|||
#set -x
|
||||
|
||||
### Setup some variables.
|
||||
### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
|
||||
### GIT_COMMIT and BUILD_URL are set by Hudson if it is run by patch process
|
||||
### Read variables from properties file
|
||||
bindir=$(dirname $0)
|
||||
|
||||
|
@ -31,15 +31,20 @@ else
|
|||
MVN=$MAVEN_HOME/bin/mvn
|
||||
fi
|
||||
|
||||
NEWLINE=$'\n'
|
||||
|
||||
PROJECT_NAME=HBase
|
||||
JENKINS=false
|
||||
MOVE_PATCH_DIR=true
|
||||
PATCH_DIR=/tmp
|
||||
BASEDIR=$(pwd)
|
||||
BRANCH_NAME="master"
|
||||
|
||||
. $BASEDIR/dev-support/test-patch.properties
|
||||
|
||||
PS=${PS:-ps}
|
||||
AWK=${AWK:-awk}
|
||||
WGET=${WGET:-wget}
|
||||
SVN=${SVN:-svn}
|
||||
GREP=${GREP:-grep}
|
||||
EGREP=${EGREP:-egrep}
|
||||
PATCH=${PATCH:-patch}
|
||||
|
@ -47,6 +52,7 @@ JIRACLI=${JIRA:-jira}
|
|||
FINDBUGS_HOME=${FINDBUGS_HOME}
|
||||
FORREST_HOME=${FORREST_HOME}
|
||||
ECLIPSE_HOME=${ECLIPSE_HOME}
|
||||
GIT=${GIT:-git}
|
||||
|
||||
###############################################################################
|
||||
printUsage() {
|
||||
|
@ -62,12 +68,12 @@ printUsage() {
|
|||
echo "--mvn-cmd=<cmd> The 'mvn' command to use (default \$MAVEN_HOME/bin/mvn, or 'mvn')"
|
||||
echo "--ps-cmd=<cmd> The 'ps' command to use (default 'ps')"
|
||||
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
|
||||
echo "--svn-cmd=<cmd> The 'svn' command to use (default 'svn')"
|
||||
echo "--grep-cmd=<cmd> The 'grep' command to use (default 'grep')"
|
||||
echo "--patch-cmd=<cmd> The 'patch' command to use (default 'patch')"
|
||||
echo "--findbugs-home=<path> Findbugs home directory (default FINDBUGS_HOME environment variable)"
|
||||
echo "--forrest-home=<path> Forrest home directory (default FORREST_HOME environment variable)"
|
||||
echo "--dirty-workspace Allow the local SVN workspace to have uncommitted changes"
|
||||
echo "--dirty-workspace Allow the local workspace to have uncommitted changes"
|
||||
echo "--git-cmd=<cmd> The 'git' command to use (default 'git')"
|
||||
echo
|
||||
echo "Jenkins-only options:"
|
||||
echo "--jenkins Run by Jenkins (runs tests and posts results to JIRA)"
|
||||
|
@ -85,6 +91,9 @@ parseArgs() {
|
|||
--jenkins)
|
||||
JENKINS=true
|
||||
;;
|
||||
--no-move-patch-dir)
|
||||
MOVE_PATCH_DIR=false
|
||||
;;
|
||||
--patch-dir=*)
|
||||
PATCH_DIR=${i#*=}
|
||||
;;
|
||||
|
@ -103,9 +112,6 @@ parseArgs() {
|
|||
--wget-cmd=*)
|
||||
WGET=${i#*=}
|
||||
;;
|
||||
--svn-cmd=*)
|
||||
SVN=${i#*=}
|
||||
;;
|
||||
--grep-cmd=*)
|
||||
GREP=${i#*=}
|
||||
;;
|
||||
|
@ -130,6 +136,9 @@ parseArgs() {
|
|||
--dirty-workspace)
|
||||
DIRTY_WORKSPACE=true
|
||||
;;
|
||||
--git-cmd=*)
|
||||
GIT=${i#*=}
|
||||
;;
|
||||
*)
|
||||
PATCH_OR_DEFECT=$i
|
||||
;;
|
||||
|
@ -180,23 +189,92 @@ checkout () {
|
|||
echo ""
|
||||
### When run by a developer, if the workspace contains modifications, do not continue
|
||||
### unless the --dirty-workspace option was set
|
||||
status=`$SVN stat --ignore-externals | sed -e '/^X[ ]*/D'`
|
||||
if [[ $JENKINS == "false" ]] ; then
|
||||
if [[ "$status" != "" && -z $DIRTY_WORKSPACE ]] ; then
|
||||
echo "ERROR: can't run in a workspace that contains the following modifications"
|
||||
echo "$status"
|
||||
cleanupAndExit 1
|
||||
if [[ -z $DIRTY_WORKSPACE ]] ; then
|
||||
# Ref http://stackoverflow.com/a/2659808 for details on checking dirty status
|
||||
${GIT} diff-index --quiet HEAD
|
||||
if [[ $? -ne 0 ]] ; then
|
||||
uncommitted=`${GIT} diff --name-only HEAD`
|
||||
uncommitted="You have the following files with uncommitted changes:${NEWLINE}${uncommitted}"
|
||||
fi
|
||||
untracked="$(${GIT} ls-files --exclude-standard --others)" && test -z "${untracked}"
|
||||
if [[ $? -ne 0 ]] ; then
|
||||
untracked="You have untracked and unignored files:${NEWLINE}${untracked}"
|
||||
fi
|
||||
if [[ $uncommitted || $untracked ]] ; then
|
||||
echo "ERROR: can't run in a workspace that contains modifications."
|
||||
echo "Pass the '--dirty-workspace' flag to bypass."
|
||||
echo ""
|
||||
echo "${uncommitted}"
|
||||
echo ""
|
||||
echo "${untracked}"
|
||||
cleanupAndExit 1
|
||||
fi
|
||||
fi
|
||||
echo
|
||||
else
|
||||
cd $BASEDIR
|
||||
$SVN revert -R .
|
||||
rm -rf `$SVN status --no-ignore`
|
||||
$SVN update
|
||||
fi
|
||||
return $?
|
||||
}
|
||||
|
||||
findBranchNameFromPatchName() {
|
||||
local patchName=$1
|
||||
for LOCAL_BRANCH_NAME in $BRANCH_NAMES; do
|
||||
if [[ $patchName =~ /jira/secure/attachment/[0-9]*/.*$LOCAL_BRANCH_NAME ]]; then
|
||||
BRANCH_NAME=$LOCAL_BRANCH_NAME
|
||||
break
|
||||
fi
|
||||
done
|
||||
return 0
|
||||
}
|
||||
|
||||
checkoutBranch() {
|
||||
echo ""
|
||||
echo ""
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo " Testing patch on branch ${BRANCH_NAME}."
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
if [[ $JENKINS == "true" ]] ; then
|
||||
if [[ "$BRANCH_NAME" != "master" ]]; then
|
||||
echo "origin/${BRANCH_NAME} HEAD is commit `${GIT} rev-list origin/${BRANCH_NAME} -1`"
|
||||
echo "${GIT} checkout -f `${GIT} rev-list origin/${BRANCH_NAME} -1`"
|
||||
${GIT} checkout -f `${GIT} rev-list origin/${BRANCH_NAME} -1`
|
||||
echo "${GIT} status"
|
||||
${GIT} status
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
### Collect findbugs reports
|
||||
collectFindbugsReports() {
|
||||
name=$1
|
||||
basedir=$2
|
||||
patch_dir=$3
|
||||
for file in $(find $basedir -name findbugsXml.xml)
|
||||
do
|
||||
relative_file=${file#$basedir/} # strip leading $basedir prefix
|
||||
if [ ! $relative_file == "target/findbugsXml.xml" ]; then
|
||||
module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path
|
||||
module_suffix=`basename ${module_suffix}`
|
||||
fi
|
||||
|
||||
cp $file $patch_dir/${name}FindbugsWarnings${module_suffix}.xml
|
||||
$FINDBUGS_HOME/bin/setBugDatabaseInfo -name $name \
|
||||
$patch_dir/${name}FindbugsWarnings${module_suffix}.xml \
|
||||
$patch_dir/${name}FindbugsWarnings${module_suffix}.xml
|
||||
done
|
||||
xml_file=$patch_dir/${name}FindbugsWarnings.xml
|
||||
html_file=$patch_dir/${name}FindbugsWarnings.html
|
||||
$FINDBUGS_HOME/bin/unionBugs -withMessages \
|
||||
-output $xml_file $patch_dir/${name}FindbugsWarnings*.xml
|
||||
$FINDBUGS_HOME/bin/convertXmlToText -html $xml_file $html_file
|
||||
file $xml_file $html_file
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
setup () {
|
||||
### Download latest patch file (ignoring .htm and .html) when run from patch process
|
||||
|
@ -219,10 +297,12 @@ setup () {
|
|||
echo "$defect patch is being downloaded at `date` from"
|
||||
echo "$patchURL"
|
||||
$WGET -q -O $PATCH_DIR/patch $patchURL
|
||||
VERSION=${SVN_REVISION}_${defect}_PATCH-${patchNum}
|
||||
VERSION=${GIT_COMMIT}_${defect}_PATCH-${patchNum}
|
||||
findBranchNameFromPatchName ${relativePatchURL}
|
||||
checkoutBranch
|
||||
JIRA_COMMENT="Here are the results of testing the latest attachment
|
||||
$patchURL
|
||||
against trunk revision ${SVN_REVISION}.
|
||||
against ${BRANCH_NAME} branch at commit ${GIT_COMMIT}.
|
||||
ATTACHMENT ID: ${ATTACHMENT_ID}"
|
||||
|
||||
### Copy the patch file to $PATCH_DIR
|
||||
|
@ -236,11 +316,9 @@ setup () {
|
|||
cleanupAndExit 0
|
||||
fi
|
||||
fi
|
||||
. $BASEDIR/dev-support/test-patch.properties
|
||||
### exit if warnings are NOT defined in the properties file
|
||||
if [ -z "$OK_FINDBUGS_WARNINGS" ] || [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]] ; then
|
||||
if [[ -z "$OK_JAVADOC_WARNINGS" ]] || [[ -z $OK_RELEASEAUDIT_WARNINGS ]] ; then
|
||||
echo "Please define the following properties in test-patch.properties file"
|
||||
echo "OK_FINDBUGS_WARNINGS"
|
||||
echo "OK_RELEASEAUDIT_WARNINGS"
|
||||
echo "OK_JAVADOC_WARNINGS"
|
||||
cleanupAndExit 1
|
||||
|
@ -249,22 +327,28 @@ setup () {
|
|||
echo ""
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo " Pre-build trunk to verify trunk stability and javac warnings"
|
||||
echo " Pre-build master to verify stability and javac warnings"
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
echo "$MVN clean package checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
|
||||
echo "$MVN clean package checkstyle:checkstyle-aggregate findbugs:findbugs -DskipTests \
|
||||
-D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1"
|
||||
export MAVEN_OPTS="${MAVEN_OPTS}"
|
||||
# build core and tests
|
||||
$MVN clean package checkstyle:checkstyle-aggregate -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
|
||||
$MVN clean package checkstyle:checkstyle-aggregate findbugs:findbugs -DskipTests \
|
||||
-D${PROJECT_NAME}PatchProcess > $PATCH_DIR/trunkJavacWarnings.txt 2>&1
|
||||
if [[ $? != 0 ]] ; then
|
||||
echo "mvn exit code was $?"
|
||||
ERR=`$GREP -A 5 'Compilation failure' $PATCH_DIR/trunkJavacWarnings.txt`
|
||||
echo "Trunk compilation is broken?
|
||||
{code}$ERR{code}"
|
||||
cleanupAndExit 1
|
||||
if [[ ${#ERR} -ge 1 ]] ; then
|
||||
echo "Trunk compilation is broken?
|
||||
{code}$ERR{code}"
|
||||
cleanupAndExit 1
|
||||
fi
|
||||
fi
|
||||
mv target/checkstyle-result.xml $PATCH_DIR/trunkCheckstyle.xml
|
||||
collectFindbugsReports trunk $BASEDIR $PATCH_DIR
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
|
@ -318,6 +402,16 @@ checkTests () {
|
|||
return 0
|
||||
fi
|
||||
fi
|
||||
srcReferences=`${GREP} "diff --git" "${PATCH_DIR}/patch" | ${GREP} "src/main" | \
|
||||
${GREP} -v "src/main/asciidoc" | ${GREP} -v "src/main/site" -c`
|
||||
if [[ $srcReferences == 0 ]] ; then
|
||||
echo "The patch doesn't appear to alter any code that requires tests."
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+0 tests included{color}. The patch appears to be a documentation, build,
|
||||
or dev-support patch that doesn't require tests."
|
||||
return 0
|
||||
fi
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 tests included{color}. The patch doesn't appear to include any new or modified tests.
|
||||
|
@ -335,21 +429,26 @@ checkTests () {
|
|||
### Check there are no compilation errors, passing a file to be parsed.
|
||||
checkCompilationErrors() {
|
||||
local file=$1
|
||||
hadoopVersion=""
|
||||
if [ "$#" -ne 1 ]; then
|
||||
hadoopVersion="with Hadoop version $2"
|
||||
fi
|
||||
COMPILATION_ERROR=false
|
||||
eval $(awk '/ERROR/ {print "COMPILATION_ERROR=true"}' $file)
|
||||
if $COMPILATION_ERROR ; then
|
||||
ERRORS=$($AWK '/ERROR/ { print $0 }' $file)
|
||||
echo "======================================================================"
|
||||
echo "There are compilation errors."
|
||||
echo "There are compilation errors $hadoopVersion."
|
||||
echo "======================================================================"
|
||||
echo "$ERRORS"
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 javac{color}. The patch appears to cause mvn compile goal to fail.
|
||||
{color:red}-1 javac{color}. The patch appears to cause mvn compile goal to fail $hadoopVersion.
|
||||
|
||||
Compilation errors resume:
|
||||
$ERRORS
|
||||
"
|
||||
submitJiraComment 1
|
||||
cleanupAndExit 1
|
||||
fi
|
||||
}
|
||||
|
@ -418,9 +517,8 @@ checkAntiPatterns () {
|
|||
if [[ $warnings != "" ]]; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 Anti-pattern{color}. The patch appears to have anti-pattern where BYTES_COMPARATOR was omitted:
|
||||
$warnings."
|
||||
return 1
|
||||
{color:red}-1 Anti-pattern{color}. The patch appears to have anti-pattern where BYTES_COMPARATOR was omitted: $warnings."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
@ -441,9 +539,8 @@ checkInterfaceAudience () {
|
|||
if [[ $warnings != "" ]]; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 InterfaceAudience{color}. The patch appears to contain InterfaceAudience from hadoop rather than hbase:
|
||||
$warnings."
|
||||
return 1
|
||||
{color:red}-1 InterfaceAudience{color}. The patch appears to contain InterfaceAudience from hadoop rather than hbase: $warnings."
|
||||
return 1
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
@ -473,6 +570,9 @@ checkJavadocWarnings () {
|
|||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 javadoc{color}. The javadoc tool appears to have generated `expr $(($javadocWarnings-$OK_JAVADOC_WARNINGS))` warning messages."
|
||||
# Add javadoc output url
|
||||
JIRA_COMMENT_FOOTER="Javadoc warnings: $BUILD_URL/artifact/patchprocess/patchJavadocWarnings.txt
|
||||
$JIRA_COMMENT_FOOTER"
|
||||
return 1
|
||||
fi
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
@ -481,6 +581,31 @@ checkJavadocWarnings () {
|
|||
return 0
|
||||
}
|
||||
|
||||
checkBuildWithHadoopVersions() {
|
||||
echo ""
|
||||
echo ""
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo " Building with all supported Hadoop versions ."
|
||||
echo "======================================================================"
|
||||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
export MAVEN_OPTS="${MAVEN_OPTS}"
|
||||
for HADOOP2_VERSION in $HADOOP2_VERSIONS ; do
|
||||
echo "$MVN clean install -DskipTests -D${PROJECT_NAME}PatchProcess -Dhadoop-two.version=$HADOOP2_VERSION > $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt 2>&1"
|
||||
$MVN clean install -DskipTests -D${PROJECT_NAME}PatchProcess -Dhadoop-two.version=$HADOOP2_VERSION > $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt 2>&1
|
||||
checkCompilationErrors $PATCH_DIR/patchJavacWithHadoop-$HADOOP2_VERSION.txt $HADOOP2_VERSION
|
||||
done
|
||||
|
||||
# TODO: add Hadoop3 versions and compilation here when we get the hadoop.profile=3.0 working
|
||||
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 hadoop versions{color}. The patch compiles with all supported hadoop versions ($HADOOP2_VERSIONS)"
|
||||
return 0
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
### Check there are no changes in the number of Javac warnings
|
||||
checkJavacWarnings () {
|
||||
|
@ -506,7 +631,7 @@ checkJavacWarnings () {
|
|||
if [[ $patchJavacWarnings -gt $trunkJavacWarnings ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 javac{color}. The applied patch generated $patchJavacWarnings javac compiler warnings (more than the trunk's current $trunkJavacWarnings warnings)."
|
||||
{color:red}-1 javac{color}. The applied patch generated $patchJavacWarnings javac compiler warnings (more than the master's current $trunkJavacWarnings warnings)."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
@ -532,23 +657,25 @@ checkCheckstyleErrors() {
|
|||
mv target/checkstyle-result.xml $PATCH_DIR/patchCheckstyle.xml
|
||||
mv target/site/checkstyle-aggregate.html $PATCH_DIR
|
||||
mv target/site/checkstyle.css $PATCH_DIR
|
||||
trunkCheckstyleErrors=`$GREP '<error' $PATCH_DIR/trunkCheckstyle.xml | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
|
||||
patchCheckstyleErrors=`$GREP '<error' $PATCH_DIR/patchCheckstyle.xml | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
|
||||
if [[ $patchCheckstyleErrors -gt $trunkCheckstyleErrors ]] ; then
|
||||
$BASEDIR/dev-support/checkstyle_report.py $PATCH_DIR/trunkCheckstyle.xml $PATCH_DIR/patchCheckstyle.xml
|
||||
if [[ $? -eq 1 ]] ; then
|
||||
JIRA_COMMENT_FOOTER="Checkstyle Errors: $BUILD_URL/artifact/patchprocess/checkstyle-aggregate.html
|
||||
|
||||
$JIRA_COMMENT_FOOTER"
|
||||
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 javac{color}. The applied patch generated $patchCheckstyleErrors checkstyle errors (more than the trunk's current $trunkCheckstyleErrors errors)."
|
||||
{color:red}-1 checkstyle{color}. The applied patch generated new checkstyle errors. Check build console for list of new errors."
|
||||
return 1
|
||||
fi
|
||||
echo "There were $patchCheckstyleErrors checkstyle errors in this patch compared to $trunkCheckstyleErrors on master."
|
||||
fi
|
||||
JIRA_COMMENT_FOOTER="Checkstyle Errors: $BUILD_URL/artifact/patchprocess/checkstyle-aggregate.html
|
||||
|
||||
$JIRA_COMMENT_FOOTER"
|
||||
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 javac{color}. The applied patch does not increase the total number of checkstyle errors"
|
||||
{color:green}+1 checkstyle{color}. The applied patch does not generate new checkstyle errors."
|
||||
return 0
|
||||
|
||||
}
|
||||
|
@ -569,7 +696,7 @@ checkProtocErrors () {
|
|||
checkProtocCompilationErrors $PATCH_DIR/patchProtocErrors.txt
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 javac{color}. The applied patch does not increase the total number of javac compiler warnings."
|
||||
{color:green}+1 protoc{color}. The applied patch does not increase the total number of protoc compiler warnings."
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -600,7 +727,7 @@ checkReleaseAuditWarnings () {
|
|||
if [[ $patchReleaseAuditWarnings -gt $OK_RELEASEAUDIT_WARNINGS ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 release audit{color}. The applied patch generated $patchReleaseAuditWarnings release audit warnings (more than the trunk's current $OK_RELEASEAUDIT_WARNINGS warnings)."
|
||||
{color:red}-1 release audit{color}. The applied patch generated $patchReleaseAuditWarnings release audit warnings (more than the master's current $OK_RELEASEAUDIT_WARNINGS warnings)."
|
||||
$GREP '\!?????' $PATCH_DIR/patchReleaseAuditWarnings.txt > $PATCH_DIR/patchReleaseAuditProblems.txt
|
||||
echo "Lines that start with ????? in the release audit report indicate files that do not have an Apache license header." >> $PATCH_DIR/patchReleaseAuditProblems.txt
|
||||
JIRA_COMMENT_FOOTER="Release audit warnings: $BUILD_URL/artifact/patchprocess/patchReleaseAuditWarnings.txt
|
||||
|
@ -638,41 +765,36 @@ checkFindbugsWarnings () {
|
|||
{color:red}-1 findbugs{color}. The patch appears to cause Findbugs (version ${findbugs_version}) to fail."
|
||||
return 1
|
||||
fi
|
||||
|
||||
findbugsWarnings=0
|
||||
for file in $(find $BASEDIR -name findbugsXml.xml)
|
||||
do
|
||||
relative_file=${file#$BASEDIR/} # strip leading $BASEDIR prefix
|
||||
if [ ! $relative_file == "target/findbugsXml.xml" ]; then
|
||||
module_suffix=${relative_file%/target/findbugsXml.xml} # strip trailing path
|
||||
module_suffix=`basename ${module_suffix}`
|
||||
fi
|
||||
|
||||
cp $file $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml
|
||||
$FINDBUGS_HOME/bin/setBugDatabaseInfo -timestamp "01/01/2000" \
|
||||
$PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \
|
||||
$PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml
|
||||
newFindbugsWarnings=`$FINDBUGS_HOME/bin/filterBugs -first "01/01/2000" $PATCH_DIR/patchFindbugsWarnings${module_suffix}.xml \
|
||||
$PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml | $AWK '{print $1}'`
|
||||
echo "Found $newFindbugsWarnings Findbugs warnings ($file)"
|
||||
findbugsWarnings=$((findbugsWarnings+newFindbugsWarnings))
|
||||
$FINDBUGS_HOME/bin/convertXmlToText -html \
|
||||
$PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.xml \
|
||||
$PATCH_DIR/newPatchFindbugsWarnings${module_suffix}.html
|
||||
JIRA_COMMENT_FOOTER="Findbugs warnings: $BUILD_URL/artifact/trunk/patchprocess/newPatchFindbugsWarnings${module_suffix}.html
|
||||
$JIRA_COMMENT_FOOTER"
|
||||
done
|
||||
|
||||
### if current warnings greater than OK_FINDBUGS_WARNINGS
|
||||
if [[ $findbugsWarnings -gt $OK_FINDBUGS_WARNINGS ]] ; then
|
||||
collectFindbugsReports patch $BASEDIR $PATCH_DIR
|
||||
#this files are generated by collectFindbugsReports() named with its first argument
|
||||
patch_xml=$PATCH_DIR/patchFindbugsWarnings.xml
|
||||
trunk_xml=$PATCH_DIR/trunkFindbugsWarnings.xml
|
||||
# combine them to one database
|
||||
combined_xml=$PATCH_DIR/combinedFindbugsWarnings.xml
|
||||
new_xml=$PATCH_DIR/newFindbugsWarnings.xml
|
||||
new_html=$PATCH_DIR/newFindbugsWarnings.html
|
||||
$FINDBUGS_HOME/bin/computeBugHistory -useAnalysisTimes -withMessages \
|
||||
-output $combined_xml $trunk_xml $patch_xml
|
||||
findbugsWarnings=$($FINDBUGS_HOME/bin/filterBugs -first patch $combined_xml $new_xml)
|
||||
findbugsFixedWarnings=$($FINDBUGS_HOME/bin/filterBugs -fixed patch $combined_xml $new_xml)
|
||||
$FINDBUGS_HOME/bin/convertXmlToText -html $new_xml $new_html
|
||||
file $new_xml $new_html
|
||||
JIRA_COMMENT_FOOTER="Release Findbugs (version ${findbugs_version}) \
|
||||
warnings: $BUILD_URL/artifact/patchprocess/newFindbugsWarnings.html
|
||||
$JIRA_COMMENT_FOOTER"
|
||||
### if current warnings greater than 0, fail
|
||||
if [[ $findbugsWarnings -gt 0 ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 findbugs{color}. The patch appears to introduce `expr $(($findbugsWarnings-$OK_FINDBUGS_WARNINGS))` new Findbugs (version ${findbugs_version}) warnings."
|
||||
{color:red}-1 findbugs{color}. The patch appears to introduce $findbugsWarnings \
|
||||
new Findbugs (version ${findbugs_version}) warnings."
|
||||
return 1
|
||||
fi
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 findbugs{color}. The patch does not introduce any new Findbugs (version ${findbugs_version}) warnings."
|
||||
{color:green}+1 findbugs{color}. The patch does not introduce any \
|
||||
new Findbugs (version ${findbugs_version}) warnings."
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -691,7 +813,7 @@ checkLineLengths () {
|
|||
#see http://en.wikipedia.org/wiki/Diff#Unified_format
|
||||
|
||||
MAX_LINE_LENGTH_PATCH=`expr $MAX_LINE_LENGTH + 1`
|
||||
lines=`cat $PATCH_DIR/patch | grep "^+" | grep -v "^@@" | grep -v "^+++" | grep -v "import" | grep -v "org.apache.thrift." | grep -v "com.google.protobuf." | grep -v "hbase.protobuf.generated" | awk -v len="$MAX_LINE_LENGTH_PATCH" 'length ($0) > len' | head -n 10`
|
||||
lines=`cat $PATCH_DIR/patch | grep "^+" | grep -v "^@@" | grep -v "^+++" | grep -v "import" | grep -v "org.apache.thrift." | grep -v "com.google.protobuf." | grep -v "protobuf.generated" | awk -v len="$MAX_LINE_LENGTH_PATCH" 'length ($0) > len' | head -n 10`
|
||||
ll=`echo "$lines" | wc -l`
|
||||
if [[ "$ll" -gt "1" ]]; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
@ -707,12 +829,6 @@ checkLineLengths () {
|
|||
return 0
|
||||
}
|
||||
|
||||
zombieCount() {
|
||||
# HBase tests have been flagged with an innocuous '-Dhbase.test' just so they can
|
||||
# be identified as hbase in a process listing.
|
||||
echo `jps -v | grep -e surefirebooter -e '-Dhbase.test' | wc -l`
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
### Run the tests
|
||||
runTests () {
|
||||
|
@ -725,57 +841,30 @@ runTests () {
|
|||
echo "======================================================================"
|
||||
echo ""
|
||||
echo ""
|
||||
|
||||
failed_tests=""
|
||||
### Kill any rogue build processes from the last attempt
|
||||
condemnedCount=`$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | $AWK 'BEGIN {total = 0} {total += 1} END {print total}'`
|
||||
echo "WARNING: $condemnedCount rogue build processes detected, terminating."
|
||||
$PS auxwww | $GREP ${PROJECT_NAME}PatchProcess | $AWK '{print $2}' | /usr/bin/xargs -t -I {} /bin/kill -9 {} > /dev/null
|
||||
echo "$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess"
|
||||
echo "$MVN clean test -Dsurefire.rerunFailingTestsCount=2 -P runAllTests -D${PROJECT_NAME}PatchProcess"
|
||||
export MAVEN_OPTS="${MAVEN_OPTS}"
|
||||
ulimit -a
|
||||
$MVN clean test -P runAllTests -D${PROJECT_NAME}PatchProcess
|
||||
# Need to export this so the zombie subshell picks up current content
|
||||
export JIRA_COMMENT
|
||||
$MVN clean test -Dsurefire.rerunFailingTestsCount=2 -P runAllTests -D${PROJECT_NAME}PatchProcess
|
||||
if [[ $? != 0 ]] ; then
|
||||
### Find and format names of failed tests
|
||||
failed_tests=`find . -name 'TEST*.xml' | xargs $GREP -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-| |g" | sed -e "s|\.xml||g"`
|
||||
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 core tests{color}. The patch failed these unit tests:
|
||||
{color:red}-1 core tests{color}. The patch failed these unit tests:
|
||||
$failed_tests"
|
||||
BAD=1
|
||||
JIRA_COMMENT=`$BASEDIR/dev-support/zombie-detector.sh ${BUILD_ID}`
|
||||
else
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 core tests{color}. The patch passed unit tests in $modules."
|
||||
BAD=0
|
||||
JIRA_COMMENT=`$BASEDIR/dev-support/zombie-detector.sh ${BUILD_ID}`
|
||||
BAD=$?
|
||||
fi
|
||||
ZOMBIE_TESTS_COUNT=`zombieCount`
|
||||
if [[ $ZOMBIE_TESTS_COUNT != 0 ]] ; then
|
||||
#It seems sometimes the tests are not dying immediately. Let's give them 30s
|
||||
echo "Suspicious java process found - waiting 30s to see if there are just slow to stop"
|
||||
sleep 30
|
||||
ZOMBIE_TESTS_COUNT=`zombieCount`
|
||||
if [[ $ZOMBIE_TESTS_COUNT != 0 ]] ; then
|
||||
echo "There are $ZOMBIE_TESTS_COUNT zombie tests, they should have been killed by surefire but survived"
|
||||
echo "************ BEGIN zombies jstack extract"
|
||||
# HBase tests have been flagged with an innocuous '-Dhbase.test' just so they can
|
||||
# be identified as hbase in a process listing.
|
||||
ZB_STACK=`jps -v | grep -e surefirebooter -e '-Dhbase.test' | cut -d ' ' -f 1 | xargs -n 1 jstack | grep ".test" | grep "\.java"`
|
||||
jps -v | grep -e surefirebooter -e '-Dhbase.test' | cut -d ' ' -f 1 | xargs -n 1 jstack
|
||||
echo "************ END zombies jstack extract"
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 core zombie tests{color}. There are ${ZOMBIE_TESTS_COUNT} zombie test(s): ${ZB_STACK}"
|
||||
BAD=1
|
||||
jps -v | grep -e surefirebooter -e '-Dhbase.test' | cut -d ' ' -f 1 | xargs kill -9
|
||||
else
|
||||
echo "We're ok: there is no zombie test, but some tests took some time to stop"
|
||||
fi
|
||||
else
|
||||
echo "We're ok: there is no zombie test"
|
||||
fi
|
||||
return $BAD
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
|
@ -791,18 +880,18 @@ checkSiteXml () {
|
|||
echo ""
|
||||
echo ""
|
||||
|
||||
echo "$MVN package site -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchSiteOutput.txt 2>&1"
|
||||
echo "$MVN package post-site -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchSiteOutput.txt 2>&1"
|
||||
export MAVEN_OPTS="${MAVEN_OPTS}"
|
||||
$MVN package site -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchSiteOutput.txt 2>&1
|
||||
$MVN package post-site -DskipTests -D${PROJECT_NAME}PatchProcess > $PATCH_DIR/patchSiteOutput.txt 2>&1
|
||||
if [[ $? != 0 ]] ; then
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:red}-1 site{color}. The patch appears to cause mvn site goal to fail."
|
||||
{color:red}-1 site{color}. The patch appears to cause mvn post-site goal to fail."
|
||||
return 1
|
||||
fi
|
||||
JIRA_COMMENT="$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 site{color}. The mvn site goal succeeds with this patch."
|
||||
{color:green}+1 site{color}. The mvn post-site goal succeeds with this patch."
|
||||
return 0
|
||||
}
|
||||
|
||||
|
@ -883,8 +972,9 @@ $comment"
|
|||
### Cleanup files
|
||||
cleanupAndExit () {
|
||||
local result=$1
|
||||
if [[ $JENKINS == "true" ]] ; then
|
||||
if [[ ${JENKINS} == "true" && ${MOVE_PATCH_DIR} == "true" ]] ; then
|
||||
if [ -e "$PATCH_DIR" ] ; then
|
||||
echo "Relocating patch dir into ${BASEDIR}"
|
||||
mv $PATCH_DIR $BASEDIR
|
||||
fi
|
||||
fi
|
||||
|
@ -913,8 +1003,10 @@ This message is automatically generated."
|
|||
parseArgs $@
|
||||
cd $BASEDIR
|
||||
|
||||
echo "Version of this script: Wed Oct 14 00:29:04 PDT 2015"
|
||||
checkout
|
||||
RESULT=$?
|
||||
echo "RESULT = " $RESULT
|
||||
if [[ $JENKINS == "true" ]] ; then
|
||||
if [[ $RESULT != 0 ]] ; then
|
||||
exit 100
|
||||
|
@ -923,8 +1015,10 @@ fi
|
|||
setup
|
||||
checkAuthor
|
||||
RESULT=$?
|
||||
echo "RESULT = " $RESULT
|
||||
checkTests
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
applyPatch
|
||||
if [[ $? != 0 ]] ; then
|
||||
submitJiraComment 1
|
||||
|
@ -933,28 +1027,42 @@ fi
|
|||
|
||||
checkAntiPatterns
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkBuildWithHadoopVersions
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkJavacWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkProtocErrors
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkJavadocWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkCheckstyleErrors
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkInterfaceAudience
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkFindbugsWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkReleaseAuditWarnings
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkLineLengths
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
checkSiteXml
|
||||
(( RESULT = RESULT + $?))
|
||||
### Do not call these when run by a developer
|
||||
echo "RESULT = " $RESULT
|
||||
### Do not call these when run by a developer
|
||||
if [[ $JENKINS == "true" ]] ; then
|
||||
runTests
|
||||
(( RESULT = RESULT + $? ))
|
||||
echo "RESULT = " $RESULT
|
||||
JIRA_COMMENT_FOOTER="Test results: $BUILD_URL/testReport/
|
||||
$JIRA_COMMENT_FOOTER"
|
||||
fi
|
||||
|
|
|
@ -0,0 +1,166 @@
|
|||
#!/usr/bin/env bash
|
||||
# Licensed to the Apache Software Foundation (ASF) under one
|
||||
# or more contributor license agreements. See the NOTICE file
|
||||
# distributed with this work for additional information
|
||||
# regarding copyright ownership. The ASF licenses this file
|
||||
# to you under the Apache License, Version 2.0 (the
|
||||
# "License"); you may not use this file except in compliance
|
||||
# with the License. You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing,
|
||||
# software distributed under the License is distributed on an
|
||||
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
# KIND, either express or implied. See the License for the
|
||||
# specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
# Looks for any running zombies left over from old build runs.
|
||||
# Will report and try to do stack trace on stale processes so can
|
||||
# figure how they are hung. Echos state as the script runs
|
||||
# on STDERR but prints final output on STDOUT formatted so it
|
||||
# will fold into the test result formatting done by test-patch.sh.
|
||||
# This script is called from test-patch.sh but also after tests
|
||||
# have run up on builds.apache.org.
|
||||
|
||||
# TODO: format output to suit context -- test-patch, jenkins or dev env
|
||||
|
||||
#set -x
|
||||
# printenv
|
||||
|
||||
### Setup some variables.
|
||||
bindir=$(dirname $0)
|
||||
|
||||
# This key is set by our surefire configuration up in the main pom.xml
|
||||
# This key needs to match the key we set up there.
|
||||
HBASE_BUILD_ID_KEY="hbase.build.id="
|
||||
JENKINS=
|
||||
|
||||
PS=${PS:-ps}
|
||||
AWK=${AWK:-awk}
|
||||
WGET=${WGET:-wget}
|
||||
GREP=${GREP:-grep}
|
||||
JIRACLI=${JIRA:-jira}
|
||||
|
||||
###############################################################################
|
||||
printUsage() {
|
||||
echo "Usage: $0 [options]" BUILD_ID
|
||||
echo
|
||||
echo "Where:"
|
||||
echo " BUILD_ID is build id to look for in process listing"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo "--ps-cmd=<cmd> The 'ps' command to use (default 'ps')"
|
||||
echo "--awk-cmd=<cmd> The 'awk' command to use (default 'awk')"
|
||||
echo "--grep-cmd=<cmd> The 'grep' command to use (default 'grep')"
|
||||
echo
|
||||
echo "Jenkins-only options:"
|
||||
echo "--jenkins Run by Jenkins (runs tests and posts results to JIRA)"
|
||||
echo "--wget-cmd=<cmd> The 'wget' command to use (default 'wget')"
|
||||
echo "--jira-cmd=<cmd> The 'jira' command to use (default 'jira')"
|
||||
}
|
||||
|
||||
###############################################################################
|
||||
parseArgs() {
|
||||
for i in $*
|
||||
do
|
||||
case $i in
|
||||
--jenkins)
|
||||
JENKINS=true
|
||||
;;
|
||||
--ps-cmd=*)
|
||||
PS=${i#*=}
|
||||
;;
|
||||
--awk-cmd=*)
|
||||
AWK=${i#*=}
|
||||
;;
|
||||
--wget-cmd=*)
|
||||
WGET=${i#*=}
|
||||
;;
|
||||
--grep-cmd=*)
|
||||
GREP=${i#*=}
|
||||
;;
|
||||
--jira-cmd=*)
|
||||
JIRACLI=${i#*=}
|
||||
;;
|
||||
*)
|
||||
BUILD_ID=$i
|
||||
;;
|
||||
esac
|
||||
done
|
||||
if [ -z "$BUILD_ID" ]; then
|
||||
printUsage
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
### Return list of the processes found with passed build id.
|
||||
find_processes () {
|
||||
jps -v | grep surefirebooter | grep -e "${HBASE_BUILD_TAG}"
|
||||
}
|
||||
|
||||
### Look for zombies
|
||||
zombies () {
|
||||
ZOMBIES=`find_processes`
|
||||
if [[ -z ${ZOMBIES} ]]
|
||||
then
|
||||
ZOMBIE_TESTS_COUNT=0
|
||||
else
|
||||
ZOMBIE_TESTS_COUNT=`echo "${ZOMBIES}"| wc -l| xargs`
|
||||
fi
|
||||
if [[ $ZOMBIE_TESTS_COUNT != 0 ]] ; then
|
||||
wait=30
|
||||
echo "`date` Found ${ZOMBIE_TESTS_COUNT} suspicious java process(es) listed below; waiting ${wait}s to see if just slow to stop" >&2
|
||||
echo ${ZOMBIES} >&2
|
||||
sleep ${wait}
|
||||
PIDS=`echo "${ZOMBIES}"|${AWK} '{print $1}'`
|
||||
ZOMBIE_TESTS_COUNT=0
|
||||
for pid in $PIDS
|
||||
do
|
||||
# Test our zombie still running (and that it still an hbase build item)
|
||||
PS_OUTPUT=`ps -p $pid | tail +2 | grep -e "${HBASE_BUILD_TAG}"`
|
||||
if [[ ! -z "${PS_OUTPUT}" ]]
|
||||
then
|
||||
echo "`date` Zombie: $PS_OUTPUT" >&2
|
||||
let "ZOMBIE_TESTS_COUNT+=1"
|
||||
PS_STACK=`jstack $pid | grep -e "\.Test" | grep -e "\.java"| head -3`
|
||||
echo "${PS_STACK}" >&2
|
||||
ZB_STACK="${ZB_STACK}\nPID=${pid} ${PS_STACK}"
|
||||
fi
|
||||
done
|
||||
if [[ $ZOMBIE_TESTS_COUNT != 0 ]]
|
||||
then
|
||||
echo "`date` There are ${ZOMBIE_TESTS_COUNT} possible zombie test(s)." >&2
|
||||
# If JIRA_COMMENT in environment, append our findings to it
|
||||
echo -e "$JIRA_COMMENT
|
||||
|
||||
{color:red}+1 zombies{red}. There are ${ZOMBIE_TESTS_COUNT} possible zombie test(s)
|
||||
${ZB_STACK}"
|
||||
# Exit with exit code of 1.
|
||||
exit 1
|
||||
else
|
||||
echo "`date` We're ok: there was a zombie candidate but it went away" >&2
|
||||
echo "$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 zombies{color}. No zombie tests found running at the end of the build (There were candidates but they seem to have gone away)."
|
||||
fi
|
||||
else
|
||||
echo "`date` We're ok: there is no zombie test" >&2
|
||||
echo "$JIRA_COMMENT
|
||||
|
||||
{color:green}+1 zombies{color}. No zombie tests found running at the end of the build."
|
||||
fi
|
||||
}
|
||||
|
||||
### Check if arguments to the script have been specified properly or not
|
||||
parseArgs $@
|
||||
HBASE_BUILD_TAG="${HBASE_BUILD_ID_KEY}${BUILD_ID}"
|
||||
zombies
|
||||
RESULT=$?
|
||||
if [[ $JENKINS == "true" ]] ; then
|
||||
if [[ $RESULT != 0 ]] ; then
|
||||
exit 100
|
||||
fi
|
||||
fi
|
||||
RESULT=$?
|
|
@ -37,6 +37,7 @@
|
|||
<include>org.apache.hbase:hbase-client</include>
|
||||
<include>org.apache.hbase:hbase-common</include>
|
||||
<include>org.apache.hbase:hbase-examples</include>
|
||||
<include>org.apache.hbase:hbase-external-blockcache</include>
|
||||
<include>org.apache.hbase:hbase-hadoop2-compat</include>
|
||||
<include>org.apache.hbase:hbase-hadoop-compat</include>
|
||||
<include>org.apache.hbase:hbase-it</include>
|
||||
|
@ -47,8 +48,6 @@
|
|||
<include>org.apache.hbase:hbase-resource-bundle</include>
|
||||
<include>org.apache.hbase:hbase-server</include>
|
||||
<include>org.apache.hbase:hbase-shaded</include>
|
||||
<include>org.apache.hbase:hbase-shaded-client</include>
|
||||
<include>org.apache.hbase:hbase-shaded-server</include>
|
||||
<include>org.apache.hbase:hbase-shell</include>
|
||||
<include>org.apache.hbase:hbase-testing-util</include>
|
||||
<include>org.apache.hbase:hbase-thrift</include>
|
||||
|
|
|
@ -32,5 +32,6 @@
|
|||
-->
|
||||
<suppressions>
|
||||
<suppress checks="." files=".*/generated/.*\.java"/>
|
||||
<suppress checks="." files=".*/generated-jamon/.*\.java"/>
|
||||
<suppress checks="MagicNumberCheck" files=".*Test\.java"/>
|
||||
</suppressions>
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
|
||||
/**
|
||||
* Exception thrown when the result needs to be chunked on the server side.
|
||||
* It signals that retries should happen right away and not count against the number of
|
||||
* retries because some of the multi was a success.
|
||||
*/
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class MultiActionResultTooLarge extends RetryImmediatelyException {
|
||||
public MultiActionResultTooLarge(String s) {
|
||||
super(s);
|
||||
}
|
||||
}
|
|
@ -0,0 +1,31 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase;
|
||||
|
||||
import java.io.IOException;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
|
||||
@InterfaceAudience.Public
|
||||
@InterfaceStability.Evolving
|
||||
public class RetryImmediatelyException extends IOException {
|
||||
public RetryImmediatelyException(String s) {
|
||||
super(s);
|
||||
}
|
||||
}
|
|
@ -41,6 +41,7 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.RetryImmediatelyException;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.DoNotRetryIOException;
|
||||
|
@ -126,19 +127,36 @@ class AsyncProcess {
|
|||
public void waitUntilDone() throws InterruptedIOException;
|
||||
}
|
||||
|
||||
/** Return value from a submit that didn't contain any requests. */
|
||||
/**
|
||||
* Return value from a submit that didn't contain any requests.
|
||||
*/
|
||||
private static final AsyncRequestFuture NO_REQS_RESULT = new AsyncRequestFuture() {
|
||||
public final Object[] result = new Object[0];
|
||||
|
||||
final Object[] result = new Object[0];
|
||||
|
||||
@Override
|
||||
public boolean hasError() { return false; }
|
||||
public boolean hasError() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@Override
|
||||
public RetriesExhaustedWithDetailsException getErrors() { return null; }
|
||||
public RetriesExhaustedWithDetailsException getErrors() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<? extends Row> getFailedOperations() { return null; }
|
||||
public List<? extends Row> getFailedOperations() {
|
||||
return null;
|
||||
}
|
||||
|
||||
@Override
|
||||
public Object[] getResults() { return result; }
|
||||
public Object[] getResults() {
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void waitUntilDone() throws InterruptedIOException {}
|
||||
public void waitUntilDone() throws InterruptedIOException {
|
||||
}
|
||||
};
|
||||
|
||||
/** Sync point for calls to multiple replicas for the same user request (Get).
|
||||
|
@ -306,8 +324,12 @@ class AsyncProcess {
|
|||
* RuntimeException
|
||||
*/
|
||||
private ExecutorService getPool(ExecutorService pool) {
|
||||
if (pool != null) return pool;
|
||||
if (this.pool != null) return this.pool;
|
||||
if (pool != null) {
|
||||
return pool;
|
||||
}
|
||||
if (this.pool != null) {
|
||||
return this.pool;
|
||||
}
|
||||
throw new RuntimeException("Neither AsyncProcess nor request have ExecutorService");
|
||||
}
|
||||
|
||||
|
@ -365,7 +387,9 @@ class AsyncProcess {
|
|||
Row r = it.next();
|
||||
HRegionLocation loc;
|
||||
try {
|
||||
if (r == null) throw new IllegalArgumentException("#" + id + ", row cannot be null");
|
||||
if (r == null) {
|
||||
throw new IllegalArgumentException("#" + id + ", row cannot be null");
|
||||
}
|
||||
// Make sure we get 0-s replica.
|
||||
RegionLocations locs = connection.locateRegion(
|
||||
tableName, r.getRow(), true, true, RegionReplicaUtil.DEFAULT_REPLICA_ID);
|
||||
|
@ -728,10 +752,10 @@ class AsyncProcess {
|
|||
// Normal case: we received an answer from the server, and it's not an exception.
|
||||
receiveMultiAction(multiAction, server, res, numAttempt);
|
||||
} catch (Throwable t) {
|
||||
// Something really bad happened. We are on the send thread that will now die.
|
||||
LOG.error("Internal AsyncProcess #" + id + " error for "
|
||||
+ tableName + " processing for " + server, t);
|
||||
throw new RuntimeException(t);
|
||||
// Something really bad happened. We are on the send thread that will now die.
|
||||
LOG.error("Internal AsyncProcess #" + id + " error for "
|
||||
+ tableName + " processing for " + server, t);
|
||||
throw new RuntimeException(t);
|
||||
} finally {
|
||||
decTaskCounters(multiAction.getRegions(), server);
|
||||
if (callsInProgress != null && callable != null) {
|
||||
|
@ -750,19 +774,25 @@ class AsyncProcess {
|
|||
|
||||
private final TableName tableName;
|
||||
private final AtomicLong actionsInProgress = new AtomicLong(-1);
|
||||
/** The lock controls access to results. It is only held when populating results where
|
||||
/**
|
||||
* The lock controls access to results. It is only held when populating results where
|
||||
* there might be several callers (eventual consistency gets). For other requests,
|
||||
* there's one unique call going on per result index. */
|
||||
* there's one unique call going on per result index.
|
||||
*/
|
||||
private final Object replicaResultLock = new Object();
|
||||
/** Result array. Null if results are not needed. Otherwise, each index corresponds to
|
||||
/**
|
||||
* Result array. Null if results are not needed. Otherwise, each index corresponds to
|
||||
* the action index in initial actions submitted. For most request types, has null-s for
|
||||
* requests that are not done, and result/exception for those that are done.
|
||||
* For eventual-consistency gets, initially the same applies; at some point, replica calls
|
||||
* might be started, and ReplicaResultState is put at the corresponding indices. The
|
||||
* returning calls check the type to detect when this is the case. After all calls are done,
|
||||
* ReplicaResultState-s are replaced with results for the user. */
|
||||
* ReplicaResultState-s are replaced with results for the user.
|
||||
*/
|
||||
private final Object[] results;
|
||||
/** Indices of replica gets in results. If null, all or no actions are replica-gets. */
|
||||
/**
|
||||
* Indices of replica gets in results. If null, all or no actions are replica-gets.
|
||||
*/
|
||||
private final int[] replicaGetIndices;
|
||||
private final boolean hasAnyReplicaGets;
|
||||
private final long nonceGroup;
|
||||
|
@ -777,7 +807,9 @@ class AsyncProcess {
|
|||
this.actionsInProgress.set(actions.size());
|
||||
if (results != null) {
|
||||
assert needResults;
|
||||
if (results.length != actions.size()) throw new AssertionError("results.length");
|
||||
if (results.length != actions.size()) {
|
||||
throw new AssertionError("results.length");
|
||||
}
|
||||
this.results = results;
|
||||
for (int i = 0; i != this.results.length; ++i) {
|
||||
results[i] = null;
|
||||
|
@ -1177,9 +1209,13 @@ class AsyncProcess {
|
|||
// We have two contradicting needs here:
|
||||
// 1) We want to get the new location after having slept, as it may change.
|
||||
// 2) We want to take into account the location when calculating the sleep time.
|
||||
// 3) If all this is just because the response needed to be chunked try again FAST.
|
||||
// It should be possible to have some heuristics to take the right decision. Short term,
|
||||
// we go for one.
|
||||
long backOffTime = errorsByServer.calculateBackoffTime(oldServer, pause);
|
||||
boolean retryImmediately = throwable instanceof RetryImmediatelyException;
|
||||
int nextAttemptNumber = retryImmediately ? numAttempt : numAttempt + 1;
|
||||
long backOffTime = retryImmediately ? 0 :
|
||||
errorsByServer.calculateBackoffTime(oldServer, pause);
|
||||
if (numAttempt > startLogErrorsCnt) {
|
||||
// We use this value to have some logs when we have multiple failures, but not too many
|
||||
// logs, as errors are to be expected when a region moves, splits and so on
|
||||
|
@ -1188,14 +1224,16 @@ class AsyncProcess {
|
|||
}
|
||||
|
||||
try {
|
||||
Thread.sleep(backOffTime);
|
||||
if (backOffTime > 0) {
|
||||
Thread.sleep(backOffTime);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("#" + id + ", not sent: " + toReplay.size() + " operations, " + oldServer, e);
|
||||
Thread.currentThread().interrupt();
|
||||
return;
|
||||
}
|
||||
|
||||
groupAndSendMultiAction(toReplay, numAttempt + 1);
|
||||
groupAndSendMultiAction(toReplay, nextAttemptNumber);
|
||||
}
|
||||
|
||||
private void logNoResubmit(ServerName oldServer, int numAttempt,
|
||||
|
@ -1255,6 +1293,7 @@ class AsyncProcess {
|
|||
// Failure: retry if it's make sense else update the errors lists
|
||||
if (result == null || result instanceof Throwable) {
|
||||
Row row = sentAction.getAction();
|
||||
throwable = ConnectionManager.findException(result);
|
||||
// Register corresponding failures once per server/once per region.
|
||||
if (!regionFailureRegistered) {
|
||||
regionFailureRegistered = true;
|
||||
|
@ -1404,7 +1443,9 @@ class AsyncProcess {
|
|||
// will either see state with callCount 0 after locking it; or will not see state at all
|
||||
// we will replace it with the result.
|
||||
synchronized (state) {
|
||||
if (state.callCount == 0) return; // someone already set the result
|
||||
if (state.callCount == 0) {
|
||||
return; // someone already set the result
|
||||
}
|
||||
state.callCount = 0;
|
||||
}
|
||||
synchronized (replicaResultLock) {
|
||||
|
|
|
@ -760,13 +760,13 @@ public class ClientScanner extends AbstractClientScanner {
|
|||
public boolean renewLease() {
|
||||
if (callable != null) {
|
||||
// do not return any rows, do not advance the scanner
|
||||
callable.setCaching(0);
|
||||
callable.setRenew(true);
|
||||
try {
|
||||
this.caller.callWithoutRetries(callable, this.scannerTimeout);
|
||||
} catch (Exception e) {
|
||||
return false;
|
||||
} finally {
|
||||
callable.setCaching(this.caching);
|
||||
callable.setRenew(false);
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -58,6 +58,7 @@ import org.apache.hadoop.hbase.MasterNotRunningException;
|
|||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.RegionLocations;
|
||||
import org.apache.hadoop.hbase.RegionTooBusyException;
|
||||
import org.apache.hadoop.hbase.RetryImmediatelyException;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotEnabledException;
|
||||
|
@ -2714,7 +2715,8 @@ class ConnectionManager {
|
|||
Throwable cur = (Throwable) exception;
|
||||
while (cur != null) {
|
||||
if (cur instanceof RegionMovedException || cur instanceof RegionOpeningException
|
||||
|| cur instanceof RegionTooBusyException || cur instanceof ThrottlingException) {
|
||||
|| cur instanceof RegionTooBusyException || cur instanceof ThrottlingException
|
||||
|| cur instanceof RetryImmediatelyException) {
|
||||
return cur;
|
||||
}
|
||||
if (cur instanceof RemoteException) {
|
||||
|
|
|
@ -20,6 +20,8 @@ package org.apache.hadoop.hbase.client;
|
|||
|
||||
import java.io.IOException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
|
@ -31,7 +33,6 @@ import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionRequest
|
|||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.FlushRegionResponse;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
import org.mortbay.log.Log;
|
||||
|
||||
import com.google.protobuf.ServiceException;
|
||||
|
||||
|
@ -41,6 +42,8 @@ import com.google.protobuf.ServiceException;
|
|||
@InterfaceAudience.Private
|
||||
public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionResponse> {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(FlushRegionCallable.class);
|
||||
|
||||
private final byte[] regionName;
|
||||
private final boolean writeFlushWalMarker;
|
||||
private boolean reload;
|
||||
|
@ -78,7 +81,7 @@ public class FlushRegionCallable extends RegionAdminServiceCallable<FlushRegionR
|
|||
if (!reload) {
|
||||
throw new IOException("Cached location seems to be different than requested region.");
|
||||
}
|
||||
Log.info("Skipping flush region, because the located region "
|
||||
LOG.info("Skipping flush region, because the located region "
|
||||
+ Bytes.toStringBinary(location.getRegionInfo().getRegionName()) + " is different than "
|
||||
+ " requested region " + Bytes.toStringBinary(regionName));
|
||||
return FlushRegionResponse.newBuilder()
|
||||
|
|
|
@ -756,7 +756,7 @@ public class HTable implements HTableInterface, RegionLocator {
|
|||
try {
|
||||
ClientProtos.GetResponse response = getStub().get(controller, request);
|
||||
if (!response.hasResult()) return null;
|
||||
return ProtobufUtil.toResult(response.getResult());
|
||||
return ProtobufUtil.toResult(response.getResult(), controller.cellScanner());
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufUtil.getRemoteException(se);
|
||||
}
|
||||
|
@ -861,7 +861,7 @@ public class HTable implements HTableInterface, RegionLocator {
|
|||
try {
|
||||
ClientProtos.GetResponse response = getStub().get(controller, request);
|
||||
if (response == null) return null;
|
||||
return ProtobufUtil.toResult(response.getResult());
|
||||
return ProtobufUtil.toResult(response.getResult(), controller.cellScanner());
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufUtil.getRemoteException(se);
|
||||
}
|
||||
|
|
|
@ -238,7 +238,7 @@ public abstract class Mutation extends OperationWithAttributes implements Row, C
|
|||
*/
|
||||
@Deprecated
|
||||
public boolean getWriteToWAL() {
|
||||
return this.durability == Durability.SKIP_WAL;
|
||||
return this.durability != Durability.SKIP_WAL;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -913,6 +913,9 @@ public class Result implements CellScannable, CellScanner {
|
|||
*/
|
||||
public static long getTotalSizeOfCells(Result result) {
|
||||
long size = 0;
|
||||
if (result.isEmpty()) {
|
||||
return size;
|
||||
}
|
||||
for (Cell c : result.rawCells()) {
|
||||
size += CellUtil.estimatedHeapSizeOf(c);
|
||||
}
|
||||
|
|
|
@ -164,7 +164,7 @@ public class RpcRetryingCallerWithReadReplicas {
|
|||
if (response == null) {
|
||||
return null;
|
||||
}
|
||||
return ProtobufUtil.toResult(response.getResult());
|
||||
return ProtobufUtil.toResult(response.getResult(), controller.cellScanner());
|
||||
} catch (ServiceException se) {
|
||||
throw ProtobufUtil.getRemoteException(se);
|
||||
}
|
||||
|
|
|
@ -72,6 +72,7 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
|
|||
protected long scannerId = -1L;
|
||||
protected boolean instantiated = false;
|
||||
protected boolean closed = false;
|
||||
protected boolean renew = false;
|
||||
private Scan scan;
|
||||
private int caching = 1;
|
||||
protected final ClusterConnection cConnection;
|
||||
|
@ -209,7 +210,7 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
|
|||
incRPCcallsMetrics();
|
||||
request =
|
||||
RequestConverter.buildScanRequest(scannerId, caching, false, nextCallSeq,
|
||||
this.scanMetrics != null);
|
||||
this.scanMetrics != null, renew);
|
||||
ScanResponse response = null;
|
||||
controller = controllerFactory.newController();
|
||||
controller.setPriority(getTableName());
|
||||
|
@ -413,6 +414,15 @@ public class ScannerCallable extends RegionServerCallable<Result[]> {
|
|||
this.closed = true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Indicate whether we make a call only to renew the lease, but without affected the scanner in
|
||||
* any other way.
|
||||
* @param val true if only the lease should be renewed
|
||||
*/
|
||||
public void setRenew(boolean val) {
|
||||
this.renew = val;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return the HRegionInfo for the current region
|
||||
*/
|
||||
|
|
|
@ -98,6 +98,10 @@ class ScannerCallableWithReplicas implements RetryingCallable<Result[]> {
|
|||
currentScannerCallable.setClose();
|
||||
}
|
||||
|
||||
public void setRenew(boolean val) {
|
||||
currentScannerCallable.setRenew(val);
|
||||
}
|
||||
|
||||
public void setCaching(int caching) {
|
||||
currentScannerCallable.setCaching(caching);
|
||||
}
|
||||
|
|
|
@ -56,7 +56,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeerZKImpl;
|
|||
import org.apache.hadoop.hbase.replication.ReplicationPeers;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationQueuesClient;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.data.Stat;
|
||||
|
@ -601,7 +600,17 @@ public class ReplicationAdmin implements Closeable {
|
|||
if (repPeers == null || repPeers.size() <= 0) {
|
||||
throw new IllegalArgumentException("Found no peer cluster for replication.");
|
||||
}
|
||||
|
||||
final TableName onlyTableNameQualifier = TableName.valueOf(tableName.getQualifierAsString());
|
||||
|
||||
for (ReplicationPeer repPeer : repPeers) {
|
||||
Map<TableName, List<String>> tableCFMap = repPeer.getTableCFs();
|
||||
// TODO Currently peer TableCFs will not include namespace so we need to check only for table
|
||||
// name without namespace in it. Need to correct this logic once we fix HBASE-11386.
|
||||
if (tableCFMap != null && !tableCFMap.containsKey(onlyTableNameQualifier)) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Configuration peerConf = repPeer.getConfiguration();
|
||||
HTableDescriptor htd = null;
|
||||
try (Connection conn = ConnectionFactory.createConnection(peerConf);
|
||||
|
@ -627,7 +636,8 @@ public class ReplicationAdmin implements Closeable {
|
|||
}
|
||||
}
|
||||
|
||||
private List<ReplicationPeer> listValidReplicationPeers() {
|
||||
@VisibleForTesting
|
||||
List<ReplicationPeer> listValidReplicationPeers() {
|
||||
Map<String, ReplicationPeerConfig> peers = listPeerConfigs();
|
||||
if (peers == null || peers.size() <= 0) {
|
||||
return null;
|
||||
|
@ -635,18 +645,17 @@ public class ReplicationAdmin implements Closeable {
|
|||
List<ReplicationPeer> validPeers = new ArrayList<ReplicationPeer>(peers.size());
|
||||
for (Entry<String, ReplicationPeerConfig> peerEntry : peers.entrySet()) {
|
||||
String peerId = peerEntry.getKey();
|
||||
String clusterKey = peerEntry.getValue().getClusterKey();
|
||||
Configuration peerConf = new Configuration(this.connection.getConfiguration());
|
||||
Stat s = null;
|
||||
try {
|
||||
ZKUtil.applyClusterKeyToConf(peerConf, clusterKey);
|
||||
Pair<ReplicationPeerConfig, Configuration> pair = this.replicationPeers.getPeerConf(peerId);
|
||||
ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst());
|
||||
Configuration peerConf = pair.getSecond();
|
||||
ReplicationPeer peer = new ReplicationPeerZKImpl(peerConf, peerId, pair.getFirst(),
|
||||
parseTableCFsFromConfig(this.getPeerTableCFs(peerId)));
|
||||
s =
|
||||
zkw.getRecoverableZooKeeper().exists(peerConf.get(HConstants.ZOOKEEPER_ZNODE_PARENT),
|
||||
null);
|
||||
if (null == s) {
|
||||
LOG.info(peerId + ' ' + clusterKey + " is invalid now.");
|
||||
LOG.info(peerId + ' ' + pair.getFirst().getClusterKey() + " is invalid now.");
|
||||
continue;
|
||||
}
|
||||
validPeers.add(peer);
|
||||
|
@ -664,10 +673,6 @@ public class ReplicationAdmin implements Closeable {
|
|||
LOG.warn("Failed to get valid replication peers due to InterruptedException.");
|
||||
LOG.debug("Failure details to get valid replication peers.", e);
|
||||
continue;
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Failed to get valid replication peers due to IOException.");
|
||||
LOG.debug("Failure details to get valid replication peers.", e);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return validPeers;
|
||||
|
|
|
@ -93,7 +93,7 @@ public class FuzzyRowFilter extends FilterBase {
|
|||
}
|
||||
|
||||
private void preprocessSearchKey(Pair<byte[], byte[]> p) {
|
||||
if (UnsafeAccess.isAvailable() == false) {
|
||||
if (UnsafeAccess.unaligned() == false) {
|
||||
return;
|
||||
}
|
||||
byte[] key = p.getFirst();
|
||||
|
@ -111,7 +111,7 @@ public class FuzzyRowFilter extends FilterBase {
|
|||
* @return mask array
|
||||
*/
|
||||
private byte[] preprocessMask(byte[] mask) {
|
||||
if (UnsafeAccess.isAvailable() == false) {
|
||||
if (UnsafeAccess.unaligned() == false) {
|
||||
return mask;
|
||||
}
|
||||
if (isPreprocessedMask(mask)) return mask;
|
||||
|
@ -320,7 +320,7 @@ public class FuzzyRowFilter extends FilterBase {
|
|||
static SatisfiesCode satisfies(boolean reverse, byte[] row, int offset, int length,
|
||||
byte[] fuzzyKeyBytes, byte[] fuzzyKeyMeta) {
|
||||
|
||||
if (UnsafeAccess.isAvailable() == false) {
|
||||
if (UnsafeAccess.unaligned() == false) {
|
||||
return satisfiesNoUnsafe(reverse, row, offset, length, fuzzyKeyBytes, fuzzyKeyMeta);
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,13 @@ import com.google.protobuf.Descriptors;
|
|||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.RpcController;
|
||||
import com.google.protobuf.ServiceException;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -34,6 +41,7 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.hbase.client.MetricsConnection;
|
||||
import org.apache.hadoop.hbase.codec.Codec;
|
||||
import org.apache.hadoop.hbase.codec.KeyValueCodec;
|
||||
import org.apache.hadoop.hbase.exceptions.ConnectionClosingException;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
import org.apache.hadoop.hbase.security.UserProvider;
|
||||
import org.apache.hadoop.hbase.util.EnvironmentEdgeManager;
|
||||
|
@ -41,10 +49,6 @@ import org.apache.hadoop.hbase.util.Pair;
|
|||
import org.apache.hadoop.hbase.util.PoolMap;
|
||||
import org.apache.hadoop.io.compress.CompressionCodec;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.SocketAddress;
|
||||
|
||||
/**
|
||||
* Provides the basics for a RpcClient implementation like configuration and Logging.
|
||||
*/
|
||||
|
@ -257,6 +261,33 @@ public abstract class AbstractRpcClient implements RpcClient {
|
|||
return new BlockingRpcChannelImplementation(this, sn, ticket, defaultOperationTimeout);
|
||||
}
|
||||
|
||||
/**
|
||||
* Takes an Exception and the address we were trying to connect to and return an IOException with
|
||||
* the input exception as the cause. The new exception provides the stack trace of the place where
|
||||
* the exception is thrown and some extra diagnostics information. If the exception is
|
||||
* ConnectException or SocketTimeoutException, return a new one of the same type; Otherwise return
|
||||
* an IOException.
|
||||
* @param addr target address
|
||||
* @param exception the relevant exception
|
||||
* @return an exception to throw
|
||||
*/
|
||||
protected IOException wrapException(InetSocketAddress addr, Exception exception) {
|
||||
if (exception instanceof ConnectException) {
|
||||
// connection refused; include the host:port in the error
|
||||
return (ConnectException) new ConnectException("Call to " + addr
|
||||
+ " failed on connection exception: " + exception).initCause(exception);
|
||||
} else if (exception instanceof SocketTimeoutException) {
|
||||
return (SocketTimeoutException) new SocketTimeoutException("Call to " + addr
|
||||
+ " failed because " + exception).initCause(exception);
|
||||
} else if (exception instanceof ConnectionClosingException) {
|
||||
return (ConnectionClosingException) new ConnectionClosingException("Call to " + addr
|
||||
+ " failed on local exception: " + exception).initCause(exception);
|
||||
} else {
|
||||
return (IOException) new IOException("Call to " + addr + " failed on local exception: "
|
||||
+ exception).initCause(exception);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Blocking rpc channel that goes via hbase rpc.
|
||||
*/
|
||||
|
|
|
@ -251,10 +251,11 @@ public class AsyncRpcClient extends AbstractRpcClient {
|
|||
if (e.getCause() instanceof IOException) {
|
||||
throw (IOException) e.getCause();
|
||||
} else {
|
||||
throw new IOException(e.getCause());
|
||||
throw wrapException(addr, (Exception) e.getCause());
|
||||
}
|
||||
} catch (TimeoutException e) {
|
||||
throw new CallTimeoutException(promise.toString());
|
||||
CallTimeoutException cte = new CallTimeoutException(promise.toString());
|
||||
throw wrapException(addr, cte);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,11 +19,37 @@
|
|||
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.Message.Builder;
|
||||
import com.google.protobuf.RpcCallback;
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import javax.security.sasl.SaslException;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
|
@ -70,37 +96,11 @@ import org.apache.htrace.Span;
|
|||
import org.apache.htrace.Trace;
|
||||
import org.apache.htrace.TraceScope;
|
||||
|
||||
import javax.net.SocketFactory;
|
||||
import javax.security.sasl.SaslException;
|
||||
import java.io.BufferedInputStream;
|
||||
import java.io.BufferedOutputStream;
|
||||
import java.io.Closeable;
|
||||
import java.io.DataInputStream;
|
||||
import java.io.DataOutputStream;
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.net.ConnectException;
|
||||
import java.net.InetSocketAddress;
|
||||
import java.net.Socket;
|
||||
import java.net.SocketAddress;
|
||||
import java.net.SocketTimeoutException;
|
||||
import java.net.UnknownHostException;
|
||||
import java.nio.ByteBuffer;
|
||||
import java.security.PrivilegedExceptionAction;
|
||||
import java.util.HashMap;
|
||||
import java.util.HashSet;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Random;
|
||||
import java.util.Set;
|
||||
import java.util.concurrent.ArrayBlockingQueue;
|
||||
import java.util.concurrent.BlockingQueue;
|
||||
import java.util.concurrent.ConcurrentSkipListMap;
|
||||
import java.util.concurrent.atomic.AtomicBoolean;
|
||||
import java.util.concurrent.atomic.AtomicInteger;
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.Descriptors.MethodDescriptor;
|
||||
import com.google.protobuf.Message;
|
||||
import com.google.protobuf.Message.Builder;
|
||||
import com.google.protobuf.RpcCallback;
|
||||
|
||||
/**
|
||||
* Does RPC against a cluster. Manages connections per regionserver in the cluster.
|
||||
|
@ -1266,36 +1266,6 @@ public class RpcClientImpl extends AbstractRpcClient {
|
|||
}
|
||||
|
||||
|
||||
/**
|
||||
* Take an IOException and the address we were trying to connect to
|
||||
* and return an IOException with the input exception as the cause.
|
||||
* The new exception provides the stack trace of the place where
|
||||
* the exception is thrown and some extra diagnostics information.
|
||||
* If the exception is ConnectException or SocketTimeoutException,
|
||||
* return a new one of the same type; Otherwise return an IOException.
|
||||
*
|
||||
* @param addr target address
|
||||
* @param exception the relevant exception
|
||||
* @return an exception to throw
|
||||
*/
|
||||
protected IOException wrapException(InetSocketAddress addr,
|
||||
IOException exception) {
|
||||
if (exception instanceof ConnectException) {
|
||||
//connection refused; include the host:port in the error
|
||||
return (ConnectException)new ConnectException(
|
||||
"Call to " + addr + " failed on connection exception: " + exception).initCause(exception);
|
||||
} else if (exception instanceof SocketTimeoutException) {
|
||||
return (SocketTimeoutException)new SocketTimeoutException("Call to " + addr +
|
||||
" failed because " + exception).initCause(exception);
|
||||
} else if (exception instanceof ConnectionClosingException){
|
||||
return (ConnectionClosingException) new ConnectionClosingException(
|
||||
"Call to " + addr + " failed on local exception: " + exception).initCause(exception);
|
||||
} else {
|
||||
return (IOException)new IOException("Call to " + addr + " failed on local exception: " +
|
||||
exception).initCause(exception);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Interrupt the connections to the given ip:port server. This should be called if the server
|
||||
* is known as actually dead. This will not prevent current operation to be retried, and,
|
||||
|
|
|
@ -19,6 +19,8 @@ package org.apache.hadoop.hbase.ipc;
|
|||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.CellScannable;
|
||||
import org.apache.hadoop.hbase.CellScanner;
|
||||
|
@ -30,7 +32,13 @@ import org.apache.hadoop.hbase.util.ReflectionUtils;
|
|||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class RpcControllerFactory {
|
||||
private static final Log LOG = LogFactory.getLog(RpcControllerFactory.class);
|
||||
|
||||
/**
|
||||
* Custom RPC Controller factory allows frameworks to change the RPC controller. If the configured
|
||||
* controller cannot be found in the classpath or loaded, we fall back to the default RPC
|
||||
* controller factory.
|
||||
*/
|
||||
public static final String CUSTOM_CONTROLLER_CONF_KEY = "hbase.rpc.controllerfactory.class";
|
||||
protected final Configuration conf;
|
||||
|
||||
|
@ -55,7 +63,21 @@ public class RpcControllerFactory {
|
|||
String rpcControllerFactoryClazz =
|
||||
configuration.get(CUSTOM_CONTROLLER_CONF_KEY,
|
||||
RpcControllerFactory.class.getName());
|
||||
return ReflectionUtils.instantiateWithCustomCtor(rpcControllerFactoryClazz,
|
||||
new Class[] { Configuration.class }, new Object[] { configuration });
|
||||
try {
|
||||
return ReflectionUtils.instantiateWithCustomCtor(rpcControllerFactoryClazz,
|
||||
new Class[] { Configuration.class }, new Object[] { configuration });
|
||||
} catch (UnsupportedOperationException | NoClassDefFoundError ex) {
|
||||
// HBASE-14960: In case the RPCController is in a non-HBase jar (Phoenix), but the application
|
||||
// is a pure HBase application, we want to fallback to the default one.
|
||||
String msg = "Cannot load configured \"" + CUSTOM_CONTROLLER_CONF_KEY + "\" ("
|
||||
+ rpcControllerFactoryClazz + ") from hbase-site.xml, falling back to use "
|
||||
+ "default RpcControllerFactory";
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.warn(msg, ex); // if DEBUG enabled, we want the exception, but still log in WARN level
|
||||
} else {
|
||||
LOG.warn(msg);
|
||||
}
|
||||
return new RpcControllerFactory(configuration);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1591,6 +1591,7 @@ public final class ProtobufUtil {
|
|||
try {
|
||||
GetResponse response = client.get(null, request);
|
||||
if (!response.hasResult()) return null;
|
||||
// We pass 'null' RpcController. So Result will be pure RB.
|
||||
return toResult(response.getResult());
|
||||
} catch (ServiceException se) {
|
||||
throw getRemoteException(se);
|
||||
|
@ -2608,12 +2609,19 @@ public final class ProtobufUtil {
|
|||
|
||||
public static CompactionDescriptor toCompactionDescriptor(HRegionInfo info, byte[] family,
|
||||
List<Path> inputPaths, List<Path> outputPaths, Path storeDir) {
|
||||
return toCompactionDescriptor(info, null, family, inputPaths, outputPaths, storeDir);
|
||||
}
|
||||
|
||||
@SuppressWarnings("deprecation")
|
||||
public static CompactionDescriptor toCompactionDescriptor(HRegionInfo info, byte[] regionName,
|
||||
byte[] family, List<Path> inputPaths, List<Path> outputPaths, Path storeDir) {
|
||||
// compaction descriptor contains relative paths.
|
||||
// input / output paths are relative to the store dir
|
||||
// store dir is relative to region dir
|
||||
CompactionDescriptor.Builder builder = CompactionDescriptor.newBuilder()
|
||||
.setTableName(ByteStringer.wrap(info.getTableName()))
|
||||
.setEncodedRegionName(ByteStringer.wrap(info.getEncodedNameAsBytes()))
|
||||
.setEncodedRegionName(ByteStringer.wrap(
|
||||
regionName == null ? info.getEncodedNameAsBytes() : regionName))
|
||||
.setFamilyName(ByteStringer.wrap(family))
|
||||
.setStoreHomeDir(storeDir.getName()); //make relative
|
||||
for (Path inputPath : inputPaths) {
|
||||
|
@ -3210,7 +3218,13 @@ public final class ProtobufUtil {
|
|||
*/
|
||||
public static HBaseProtos.VersionInfo getVersionInfo() {
|
||||
HBaseProtos.VersionInfo.Builder builder = HBaseProtos.VersionInfo.newBuilder();
|
||||
builder.setVersion(VersionInfo.getVersion());
|
||||
String version = VersionInfo.getVersion();
|
||||
builder.setVersion(version);
|
||||
String[] components = version.split("\\.");
|
||||
if (components != null && components.length > 2) {
|
||||
builder.setVersionMajor(Integer.parseInt(components[0]));
|
||||
builder.setVersionMinor(Integer.parseInt(components[1]));
|
||||
}
|
||||
builder.setUrl(VersionInfo.getUrl());
|
||||
builder.setRevision(VersionInfo.getRevision());
|
||||
builder.setUser(VersionInfo.getUser());
|
||||
|
|
|
@ -529,7 +529,8 @@ public final class RequestConverter {
|
|||
* @return a scan request
|
||||
*/
|
||||
public static ScanRequest buildScanRequest(final long scannerId, final int numberOfRows,
|
||||
final boolean closeScanner, final long nextCallSeq, final boolean trackMetrics) {
|
||||
final boolean closeScanner, final long nextCallSeq, final boolean trackMetrics,
|
||||
final boolean renew) {
|
||||
ScanRequest.Builder builder = ScanRequest.newBuilder();
|
||||
builder.setNumberOfRows(numberOfRows);
|
||||
builder.setCloseScanner(closeScanner);
|
||||
|
@ -538,6 +539,7 @@ public final class RequestConverter {
|
|||
builder.setClientHandlesPartials(true);
|
||||
builder.setClientHandlesHeartbeats(true);
|
||||
builder.setTrackScanMetrics(trackMetrics);
|
||||
builder.setRenew(renew);
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea
|
|||
private TableCFsTracker tableCFsTracker;
|
||||
|
||||
/**
|
||||
* Constructor that takes all the objects required to communicate with the
|
||||
* specified peer, except for the region server addresses.
|
||||
* Constructor that takes all the objects required to communicate with the specified peer, except
|
||||
* for the region server addresses.
|
||||
* @param conf configuration object to this peer
|
||||
* @param id string representation of this peer's identifier
|
||||
* @param peerConfig configuration for the replication peer
|
||||
|
@ -67,6 +67,22 @@ public class ReplicationPeerZKImpl implements ReplicationPeer, Abortable, Closea
|
|||
this.peerConfig = peerConfig;
|
||||
this.id = id;
|
||||
}
|
||||
|
||||
/**
|
||||
* Constructor that takes all the objects required to communicate with the specified peer, except
|
||||
* for the region server addresses.
|
||||
* @param conf configuration object to this peer
|
||||
* @param id string representation of this peer's identifier
|
||||
* @param peerConfig configuration for the replication peer
|
||||
* @param tableCFs table-cf configuration for this peer
|
||||
*/
|
||||
public ReplicationPeerZKImpl(Configuration conf, String id, ReplicationPeerConfig peerConfig,
|
||||
Map<TableName, List<String>> tableCFs) throws ReplicationException {
|
||||
this.conf = conf;
|
||||
this.peerConfig = peerConfig;
|
||||
this.id = id;
|
||||
this.tableCFs = tableCFs;
|
||||
}
|
||||
|
||||
/**
|
||||
* start a state tracker to check whether this peer is enabled or not
|
||||
|
|
|
@ -50,7 +50,7 @@ public interface ReplicationPeers {
|
|||
* @param peerId a short that identifies the cluster
|
||||
* @param peerConfig configuration for the replication slave cluster
|
||||
* @param tableCFs the table and column-family list which will be replicated for this peer or null
|
||||
* for all table and column families
|
||||
* for all table and column families
|
||||
*/
|
||||
void addPeer(String peerId, ReplicationPeerConfig peerConfig, String tableCFs)
|
||||
throws ReplicationException;
|
||||
|
|
|
@ -29,6 +29,7 @@ import java.util.concurrent.ConcurrentMap;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
|
@ -46,6 +47,7 @@ import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
|||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil.ZKUtilOp;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
import org.apache.zookeeper.KeeperException.NoNodeException;
|
||||
|
||||
import com.google.protobuf.ByteString;
|
||||
|
||||
|
@ -119,8 +121,21 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
|||
}
|
||||
|
||||
checkQueuesDeleted(id);
|
||||
|
||||
|
||||
ZKUtil.createWithParents(this.zookeeper, this.peersZNode);
|
||||
|
||||
// If only bulk load hfile replication is enabled then add peerId node to hfile-refs node
|
||||
if (replicationForBulkLoadEnabled) {
|
||||
try {
|
||||
String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id);
|
||||
LOG.info("Adding peer " + peerId + " to hfile reference queue.");
|
||||
ZKUtil.createWithParents(this.zookeeper, peerId);
|
||||
} catch (KeeperException e) {
|
||||
throw new ReplicationException("Failed to add peer with id=" + id
|
||||
+ ", node under hfile references node.", e);
|
||||
}
|
||||
}
|
||||
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
||||
ZKUtilOp op1 = ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(this.peersZNode, id),
|
||||
toByteArray(peerConfig));
|
||||
|
@ -150,6 +165,16 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
|||
+ " because that id does not exist.");
|
||||
}
|
||||
ZKUtil.deleteNodeRecursively(this.zookeeper, ZKUtil.joinZNode(this.peersZNode, id));
|
||||
// Delete peerId node from hfile-refs node irrespective of whether bulk loaded hfile
|
||||
// replication is enabled or not
|
||||
|
||||
String peerId = ZKUtil.joinZNode(this.hfileRefsZNode, id);
|
||||
try {
|
||||
LOG.info("Removing peer " + peerId + " from hfile reference queue.");
|
||||
ZKUtil.deleteNodeRecursively(this.zookeeper, peerId);
|
||||
} catch (NoNodeException e) {
|
||||
LOG.info("Did not find node " + peerId + " to delete.", e);
|
||||
}
|
||||
} catch (KeeperException e) {
|
||||
throw new ReplicationException("Could not remove peer with id=" + id, e);
|
||||
}
|
||||
|
@ -318,11 +343,9 @@ public class ReplicationPeersZKImpl extends ReplicationStateZKBase implements Re
|
|||
return null;
|
||||
}
|
||||
|
||||
Configuration otherConf = new Configuration(this.conf);
|
||||
Configuration otherConf;
|
||||
try {
|
||||
if (peerConfig.getClusterKey() != null && !peerConfig.getClusterKey().isEmpty()) {
|
||||
ZKUtil.applyClusterKeyToConf(otherConf, peerConfig.getClusterKey());
|
||||
}
|
||||
otherConf = HBaseConfiguration.createClusterConf(this.conf, peerConfig.getClusterKey());
|
||||
} catch (IOException e) {
|
||||
LOG.error("Can't get peer configuration for peerId=" + peerId + " because:", e);
|
||||
return null;
|
||||
|
|
|
@ -26,7 +26,8 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
|
||||
/**
|
||||
* This provides an interface for maintaining a region server's replication queues. These queues
|
||||
* keep track of the WALs that still need to be replicated to remote clusters.
|
||||
* keep track of the WALs and HFile references (if hbase.replication.bulkload.enabled is enabled)
|
||||
* that still need to be replicated to remote clusters.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface ReplicationQueues {
|
||||
|
@ -113,4 +114,26 @@ public interface ReplicationQueues {
|
|||
* @return if this is this rs's znode
|
||||
*/
|
||||
boolean isThisOurZnode(String znode);
|
||||
|
||||
/**
|
||||
* Add a peer to hfile reference queue if peer does not exist.
|
||||
* @param peerId peer cluster id to be added
|
||||
* @throws ReplicationException if fails to add a peer id to hfile reference queue
|
||||
*/
|
||||
void addPeerToHFileRefs(String peerId) throws ReplicationException;
|
||||
|
||||
/**
|
||||
* Add new hfile references to the queue.
|
||||
* @param peerId peer cluster id to which the hfiles need to be replicated
|
||||
* @param files list of hfile references to be added
|
||||
* @throws ReplicationException if fails to add a hfile reference
|
||||
*/
|
||||
void addHFileRefs(String peerId, List<String> files) throws ReplicationException;
|
||||
|
||||
/**
|
||||
* Remove hfile references from the queue.
|
||||
* @param peerId peer cluster id from which this hfile references needs to be removed
|
||||
* @param files list of hfile references to be removed
|
||||
*/
|
||||
void removeHFileRefs(String peerId, List<String> files);
|
||||
}
|
||||
|
|
|
@ -25,7 +25,8 @@ import org.apache.zookeeper.KeeperException;
|
|||
|
||||
/**
|
||||
* This provides an interface for clients of replication to view replication queues. These queues
|
||||
* keep track of the WALs that still need to be replicated to remote clusters.
|
||||
* keep track of the sources(WALs/HFile references) that still need to be replicated to remote
|
||||
* clusters.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public interface ReplicationQueuesClient {
|
||||
|
@ -65,4 +66,26 @@ public interface ReplicationQueuesClient {
|
|||
* @return cversion of replication rs node
|
||||
*/
|
||||
int getQueuesZNodeCversion() throws KeeperException;
|
||||
|
||||
/**
|
||||
* Get the change version number of replication hfile references node. This can be used as
|
||||
* optimistic locking to get a consistent snapshot of the replication queues of hfile references.
|
||||
* @return change version number of hfile references node
|
||||
*/
|
||||
int getHFileRefsNodeChangeVersion() throws KeeperException;
|
||||
|
||||
/**
|
||||
* Get list of all peers from hfile reference queue.
|
||||
* @return a list of peer ids
|
||||
* @throws KeeperException zookeeper exception
|
||||
*/
|
||||
List<String> getAllPeersFromHFileRefsQueue() throws KeeperException;
|
||||
|
||||
/**
|
||||
* Get a list of all hfile references in the given peer.
|
||||
* @param peerId a String that identifies the peer
|
||||
* @return a list of hfile references, null if not found any
|
||||
* @throws KeeperException zookeeper exception
|
||||
*/
|
||||
List<String> getReplicableHFiles(String peerId) throws KeeperException;
|
||||
}
|
||||
|
|
|
@ -84,4 +84,41 @@ public class ReplicationQueuesClientZKImpl extends ReplicationStateZKBase implem
|
|||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getHFileRefsNodeChangeVersion() throws KeeperException {
|
||||
Stat stat = new Stat();
|
||||
try {
|
||||
ZKUtil.getDataNoWatch(this.zookeeper, this.hfileRefsZNode, stat);
|
||||
} catch (KeeperException e) {
|
||||
this.abortable.abort("Failed to get stat of replication hfile references node.", e);
|
||||
throw e;
|
||||
}
|
||||
return stat.getCversion();
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getAllPeersFromHFileRefsQueue() throws KeeperException {
|
||||
List<String> result = null;
|
||||
try {
|
||||
result = ZKUtil.listChildrenNoWatch(this.zookeeper, this.hfileRefsZNode);
|
||||
} catch (KeeperException e) {
|
||||
this.abortable.abort("Failed to get list of all peers in hfile references node.", e);
|
||||
throw e;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
@Override
|
||||
public List<String> getReplicableHFiles(String peerId) throws KeeperException {
|
||||
String znode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
|
||||
List<String> result = null;
|
||||
try {
|
||||
result = ZKUtil.listChildrenNoWatch(this.zookeeper, znode);
|
||||
} catch (KeeperException e) {
|
||||
this.abortable.abort("Failed to get list of hfile references for peerId=" + peerId, e);
|
||||
throw e;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,6 +84,15 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
|
|||
} catch (KeeperException e) {
|
||||
throw new ReplicationException("Could not initialize replication queues.", e);
|
||||
}
|
||||
// If only bulk load hfile replication is enabled then create the hfile-refs znode
|
||||
if (replicationForBulkLoadEnabled) {
|
||||
try {
|
||||
ZKUtil.createWithParents(this.zookeeper, this.hfileRefsZNode);
|
||||
} catch (KeeperException e) {
|
||||
throw new ReplicationException("Could not initialize hfile references replication queue.",
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -431,4 +440,65 @@ public class ReplicationQueuesZKImpl extends ReplicationStateZKBase implements R
|
|||
ZooKeeperProtos.ReplicationLock.newBuilder().setLockOwner(lockOwner).build().toByteArray();
|
||||
return ProtobufUtil.prependPBMagic(bytes);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addHFileRefs(String peerId, List<String> files) throws ReplicationException {
|
||||
String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
|
||||
boolean debugEnabled = LOG.isDebugEnabled();
|
||||
if (debugEnabled) {
|
||||
LOG.debug("Adding hfile references " + files + " in queue " + peerZnode);
|
||||
}
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
||||
int size = files.size();
|
||||
for (int i = 0; i < size; i++) {
|
||||
listOfOps.add(ZKUtilOp.createAndFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i)),
|
||||
HConstants.EMPTY_BYTE_ARRAY));
|
||||
}
|
||||
if (debugEnabled) {
|
||||
LOG.debug(" The multi list size for adding hfile references in zk for node " + peerZnode
|
||||
+ " is " + listOfOps.size());
|
||||
}
|
||||
try {
|
||||
ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true);
|
||||
} catch (KeeperException e) {
|
||||
throw new ReplicationException("Failed to create hfile reference znode=" + e.getPath(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void removeHFileRefs(String peerId, List<String> files) {
|
||||
String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
|
||||
boolean debugEnabled = LOG.isDebugEnabled();
|
||||
if (debugEnabled) {
|
||||
LOG.debug("Removing hfile references " + files + " from queue " + peerZnode);
|
||||
}
|
||||
List<ZKUtilOp> listOfOps = new ArrayList<ZKUtil.ZKUtilOp>();
|
||||
int size = files.size();
|
||||
for (int i = 0; i < size; i++) {
|
||||
listOfOps.add(ZKUtilOp.deleteNodeFailSilent(ZKUtil.joinZNode(peerZnode, files.get(i))));
|
||||
}
|
||||
if (debugEnabled) {
|
||||
LOG.debug(" The multi list size for removing hfile references in zk for node " + peerZnode
|
||||
+ " is " + listOfOps.size());
|
||||
}
|
||||
try {
|
||||
ZKUtil.multiOrSequential(this.zookeeper, listOfOps, true);
|
||||
} catch (KeeperException e) {
|
||||
LOG.error("Failed to remove hfile reference znode=" + e.getPath(), e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public void addPeerToHFileRefs(String peerId) throws ReplicationException {
|
||||
String peerZnode = ZKUtil.joinZNode(this.hfileRefsZNode, peerId);
|
||||
try {
|
||||
if (ZKUtil.checkExists(this.zookeeper, peerZnode) == -1) {
|
||||
LOG.info("Adding peer " + peerId + " to hfile reference queue.");
|
||||
ZKUtil.createWithParents(this.zookeeper, peerZnode);
|
||||
}
|
||||
} catch (KeeperException e) {
|
||||
throw new ReplicationException("Failed to add peer " + peerId + " to hfile reference queue.",
|
||||
e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,11 +20,13 @@ package org.apache.hadoop.hbase.replication;
|
|||
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.Abortable;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
import org.apache.zookeeper.KeeperException;
|
||||
|
@ -47,32 +49,43 @@ public abstract class ReplicationStateZKBase {
|
|||
protected final String peersZNode;
|
||||
/** The name of the znode that contains all replication queues */
|
||||
protected final String queuesZNode;
|
||||
/** The name of the znode that contains queues of hfile references to be replicated */
|
||||
protected final String hfileRefsZNode;
|
||||
/** The cluster key of the local cluster */
|
||||
protected final String ourClusterKey;
|
||||
protected final ZooKeeperWatcher zookeeper;
|
||||
protected final Configuration conf;
|
||||
protected final Abortable abortable;
|
||||
protected final boolean replicationForBulkLoadEnabled;
|
||||
|
||||
// Public for testing
|
||||
public static final byte[] ENABLED_ZNODE_BYTES =
|
||||
toByteArray(ZooKeeperProtos.ReplicationState.State.ENABLED);
|
||||
public static final byte[] DISABLED_ZNODE_BYTES =
|
||||
toByteArray(ZooKeeperProtos.ReplicationState.State.DISABLED);
|
||||
public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY =
|
||||
"zookeeper.znode.replication.hfile.refs";
|
||||
public static final String ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT = "hfile-refs";
|
||||
|
||||
public ReplicationStateZKBase(ZooKeeperWatcher zookeeper, Configuration conf,
|
||||
Abortable abortable) {
|
||||
this.zookeeper = zookeeper;
|
||||
this.conf = conf;
|
||||
this.abortable = abortable;
|
||||
this.replicationForBulkLoadEnabled = conf.getBoolean(HConstants.REPLICATION_BULKLOAD_ENABLE_KEY,
|
||||
HConstants.REPLICATION_BULKLOAD_ENABLE_DEFAULT);
|
||||
|
||||
String replicationZNodeName = conf.get("zookeeper.znode.replication", "replication");
|
||||
String peersZNodeName = conf.get("zookeeper.znode.replication.peers", "peers");
|
||||
String queuesZNodeName = conf.get("zookeeper.znode.replication.rs", "rs");
|
||||
String hfileRefsZNodeName = conf.get(ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY,
|
||||
ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT);
|
||||
this.peerStateNodeName = conf.get("zookeeper.znode.replication.peers.state", "peer-state");
|
||||
this.ourClusterKey = ZKUtil.getZooKeeperClusterKey(this.conf);
|
||||
this.ourClusterKey = ZKConfig.getZooKeeperClusterKey(this.conf);
|
||||
this.replicationZNode = ZKUtil.joinZNode(this.zookeeper.baseZNode, replicationZNodeName);
|
||||
this.peersZNode = ZKUtil.joinZNode(replicationZNode, peersZNodeName);
|
||||
this.queuesZNode = ZKUtil.joinZNode(replicationZNode, queuesZNodeName);
|
||||
this.hfileRefsZNode = ZKUtil.joinZNode(replicationZNode, hfileRefsZNodeName);
|
||||
}
|
||||
|
||||
public List<String> getListOfReplicators() {
|
||||
|
|
|
@ -49,6 +49,7 @@ import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
|||
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ClusterStatusProtos.RegionStoreSequenceIds;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ZooKeeperProtos;
|
||||
import org.apache.hadoop.hbase.replication.ReplicationStateZKBase;
|
||||
import org.apache.hadoop.hbase.security.Superusers;
|
||||
import org.apache.hadoop.hbase.util.ByteStringer;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
|
@ -76,7 +77,6 @@ import org.apache.zookeeper.proto.DeleteRequest;
|
|||
import org.apache.zookeeper.proto.SetDataRequest;
|
||||
import org.apache.zookeeper.server.ZooKeeperSaslServer;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
|
||||
/**
|
||||
|
@ -96,25 +96,6 @@ public class ZKUtil {
|
|||
public static final char ZNODE_PATH_SEPARATOR = '/';
|
||||
private static int zkDumpConnectionTimeOut;
|
||||
|
||||
// The Quorum for the ZK cluster can have one the following format (see examples below):
|
||||
// (1). s1,s2,s3 (no client port in the list, the client port could be obtained from clientPort)
|
||||
// (2). s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server,
|
||||
// in this case, the clientPort would be ignored)
|
||||
// (3). s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use
|
||||
// the clientPort; otherwise, it would use the specified port)
|
||||
@VisibleForTesting
|
||||
public static class ZKClusterKey {
|
||||
public String quorumString;
|
||||
public int clientPort;
|
||||
public String znodeParent;
|
||||
|
||||
ZKClusterKey(String quorumString, int clientPort, String znodeParent) {
|
||||
this.quorumString = quorumString;
|
||||
this.clientPort = clientPort;
|
||||
this.znodeParent = znodeParent;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Creates a new connection to ZooKeeper, pulling settings and ensemble config
|
||||
* from the specified configuration object using methods from {@link ZKConfig}.
|
||||
|
@ -365,110 +346,6 @@ public class ZKUtil {
|
|||
return path.substring(path.lastIndexOf("/")+1);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the key to the ZK ensemble for this configuration without
|
||||
* adding a name at the end
|
||||
* @param conf Configuration to use to build the key
|
||||
* @return ensemble key without a name
|
||||
*/
|
||||
public static String getZooKeeperClusterKey(Configuration conf) {
|
||||
return getZooKeeperClusterKey(conf, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the key to the ZK ensemble for this configuration and append
|
||||
* a name at the end
|
||||
* @param conf Configuration to use to build the key
|
||||
* @param name Name that should be appended at the end if not empty or null
|
||||
* @return ensemble key with a name (if any)
|
||||
*/
|
||||
public static String getZooKeeperClusterKey(Configuration conf, String name) {
|
||||
String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM).replaceAll(
|
||||
"[\\t\\n\\x0B\\f\\r]", "");
|
||||
StringBuilder builder = new StringBuilder(ensemble);
|
||||
builder.append(":");
|
||||
builder.append(conf.get(HConstants.ZOOKEEPER_CLIENT_PORT));
|
||||
builder.append(":");
|
||||
builder.append(conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
|
||||
if (name != null && !name.isEmpty()) {
|
||||
builder.append(",");
|
||||
builder.append(name);
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the settings in the given key to the given configuration, this is
|
||||
* used to communicate with distant clusters
|
||||
* @param conf configuration object to configure
|
||||
* @param key string that contains the 3 required configuratins
|
||||
* @throws IOException
|
||||
*/
|
||||
public static void applyClusterKeyToConf(Configuration conf, String key)
|
||||
throws IOException{
|
||||
ZKClusterKey zkClusterKey = transformClusterKey(key);
|
||||
conf.set(HConstants.ZOOKEEPER_QUORUM, zkClusterKey.quorumString);
|
||||
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClusterKey.clientPort);
|
||||
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkClusterKey.znodeParent);
|
||||
}
|
||||
|
||||
/**
|
||||
* Separate the given key into the three configurations it should contain:
|
||||
* hbase.zookeeper.quorum, hbase.zookeeper.client.port
|
||||
* and zookeeper.znode.parent
|
||||
* @param key
|
||||
* @return the three configuration in the described order
|
||||
* @throws IOException
|
||||
*/
|
||||
public static ZKClusterKey transformClusterKey(String key) throws IOException {
|
||||
String[] parts = key.split(":");
|
||||
|
||||
if (parts.length == 3) {
|
||||
return new ZKClusterKey(parts [0], Integer.parseInt(parts [1]), parts [2]);
|
||||
}
|
||||
|
||||
if (parts.length > 3) {
|
||||
// The quorum could contain client port in server:clientport format, try to transform more.
|
||||
String zNodeParent = parts [parts.length - 1];
|
||||
String clientPort = parts [parts.length - 2];
|
||||
|
||||
// The first part length is the total length minus the lengths of other parts and minus 2 ":"
|
||||
int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2;
|
||||
String quorumStringInput = key.substring(0, endQuorumIndex);
|
||||
String[] serverHosts = quorumStringInput.split(",");
|
||||
|
||||
// The common case is that every server has its own client port specified - this means
|
||||
// that (total parts - the ZNodeParent part - the ClientPort part) is equal to
|
||||
// (the number of "," + 1) - "+ 1" because the last server has no ",".
|
||||
if ((parts.length - 2) == (serverHosts.length + 1)) {
|
||||
return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent);
|
||||
}
|
||||
|
||||
// For the uncommon case that some servers has no port specified, we need to build the
|
||||
// server:clientport list using default client port for servers without specified port.
|
||||
return new ZKClusterKey(
|
||||
ZKConfig.buildQuorumServerString(serverHosts, clientPort),
|
||||
Integer.parseInt(clientPort),
|
||||
zNodeParent);
|
||||
}
|
||||
|
||||
throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" +
|
||||
HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":"
|
||||
+ HConstants.ZOOKEEPER_ZNODE_PARENT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Standardize the ZK quorum string: make it a "server:clientport" list, separated by ','
|
||||
* @param quorumStringInput a string contains a list of servers for ZK quorum
|
||||
* @param clientPort the default client port
|
||||
* @return the string for a list of "server:port" separated by ","
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static String standardizeQuorumServerString(String quorumStringInput, String clientPort) {
|
||||
String[] serverHosts = quorumStringInput.split(",");
|
||||
return ZKConfig.buildQuorumServerString(serverHosts, clientPort);
|
||||
}
|
||||
|
||||
//
|
||||
// Existence checks and watches
|
||||
//
|
||||
|
@ -1010,7 +887,7 @@ public class ZKUtil {
|
|||
JaasConfiguration.SERVER_KEYTAB_KERBEROS_CONFIG_NAME) == null
|
||||
&& conf.get(HConstants.ZK_CLIENT_KERBEROS_PRINCIPAL) == null
|
||||
&& conf.get(HConstants.ZK_SERVER_KERBEROS_PRINCIPAL) == null) {
|
||||
|
||||
|
||||
return false;
|
||||
}
|
||||
} catch(Exception e) {
|
||||
|
@ -1923,6 +1800,27 @@ public class ZKUtil {
|
|||
} else if (child.equals(zkw.getConfiguration().
|
||||
get("zookeeper.znode.replication.rs", "rs"))) {
|
||||
appendRSZnodes(zkw, znode, sb);
|
||||
} else if (child.equals(zkw.getConfiguration().get(
|
||||
ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_KEY,
|
||||
ReplicationStateZKBase.ZOOKEEPER_ZNODE_REPLICATION_HFILE_REFS_DEFAULT))) {
|
||||
appendHFileRefsZnodes(zkw, znode, sb);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private static void appendHFileRefsZnodes(ZooKeeperWatcher zkw, String hfileRefsZnode,
|
||||
StringBuilder sb) throws KeeperException {
|
||||
sb.append("\n").append(hfileRefsZnode).append(": ");
|
||||
for (String peerIdZnode : ZKUtil.listChildrenNoWatch(zkw, hfileRefsZnode)) {
|
||||
String znodeToProcess = ZKUtil.joinZNode(hfileRefsZnode, peerIdZnode);
|
||||
sb.append("\n").append(znodeToProcess).append(": ");
|
||||
List<String> peerHFileRefsZnodes = ZKUtil.listChildrenNoWatch(zkw, znodeToProcess);
|
||||
int size = peerHFileRefsZnodes.size();
|
||||
for (int i = 0; i < size; i++) {
|
||||
sb.append(peerHFileRefsZnodes.get(i));
|
||||
if (i != size - 1) {
|
||||
sb.append(", ");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -40,17 +40,6 @@ import org.junit.experimental.categories.Category;
|
|||
@Category({SmallTests.class})
|
||||
public class TestZKUtil {
|
||||
|
||||
@Test
|
||||
public void testGetZooKeeperClusterKey() {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
conf.set(HConstants.ZOOKEEPER_QUORUM, "\tlocalhost\n");
|
||||
conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "3333");
|
||||
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "hbase");
|
||||
String clusterKey = ZKUtil.getZooKeeperClusterKey(conf, "test");
|
||||
Assert.assertTrue(!clusterKey.contains("\t") && !clusterKey.contains("\n"));
|
||||
Assert.assertEquals("localhost:3333:hbase,test", clusterKey);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testCreateACL() throws ZooKeeperConnectionException, IOException {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
|
|
|
@ -317,7 +317,7 @@ public class ChoreService implements ChoreServicer {
|
|||
* in the middle of execution will be interrupted and shutdown. This service will be unusable
|
||||
* after this method has been called (i.e. future scheduling attempts will fail).
|
||||
*/
|
||||
public void shutdown() {
|
||||
public synchronized void shutdown() {
|
||||
scheduler.shutdownNow();
|
||||
if (LOG.isInfoEnabled()) {
|
||||
LOG.info("Chore service for: " + coreThreadPoolPrefix + " had " + scheduledChores.keySet()
|
||||
|
|
|
@ -20,15 +20,16 @@ package org.apache.hadoop.hbase;
|
|||
import java.io.IOException;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.lang.reflect.Method;
|
||||
import java.util.Map.Entry;
|
||||
import java.util.Map;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;
|
||||
import org.apache.hadoop.hbase.util.VersionInfo;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
|
||||
|
||||
/**
|
||||
* Adds HBase configuration files to a Configuration
|
||||
|
@ -113,7 +114,7 @@ public class HBaseConfiguration extends Configuration {
|
|||
* @param srcConf the source configuration
|
||||
**/
|
||||
public static void merge(Configuration destConf, Configuration srcConf) {
|
||||
for (Entry<String, String> e : srcConf) {
|
||||
for (Map.Entry<String, String> e : srcConf) {
|
||||
destConf.set(e.getKey(), e.getValue());
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +128,7 @@ public class HBaseConfiguration extends Configuration {
|
|||
*/
|
||||
public static Configuration subset(Configuration srcConf, String prefix) {
|
||||
Configuration newConf = new Configuration(false);
|
||||
for (Entry<String, String> entry : srcConf) {
|
||||
for (Map.Entry<String, String> entry : srcConf) {
|
||||
if (entry.getKey().startsWith(prefix)) {
|
||||
String newKey = entry.getKey().substring(prefix.length());
|
||||
// avoid entries that would produce an empty key
|
||||
|
@ -139,6 +140,18 @@ public class HBaseConfiguration extends Configuration {
|
|||
return newConf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Sets all the entries in the provided {@code Map<String, String>} as properties in the
|
||||
* given {@code Configuration}. Each property will have the specified prefix prepended,
|
||||
* so that the configuration entries are keyed by {@code prefix + entry.getKey()}.
|
||||
*/
|
||||
public static void setWithPrefix(Configuration conf, String prefix,
|
||||
Iterable<Map.Entry<String, String>> properties) {
|
||||
for (Map.Entry<String, String> entry : properties) {
|
||||
conf.set(prefix + entry.getKey(), entry.getValue());
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @return whether to show HBase Configuration in servlet
|
||||
*/
|
||||
|
@ -233,7 +246,67 @@ public class HBaseConfiguration extends Configuration {
|
|||
return passwd;
|
||||
}
|
||||
|
||||
/** For debugging. Dump configurations to system output as xml format.
|
||||
/**
|
||||
* Generates a {@link Configuration} instance by applying the ZooKeeper cluster key
|
||||
* to the base Configuration. Note that additional configuration properties may be needed
|
||||
* for a remote cluster, so it is preferable to use
|
||||
* {@link #createClusterConf(Configuration, String, String)}.
|
||||
*
|
||||
* @param baseConf the base configuration to use, containing prefixed override properties
|
||||
* @param clusterKey the ZooKeeper quorum cluster key to apply, or {@code null} if none
|
||||
*
|
||||
* @return the merged configuration with override properties and cluster key applied
|
||||
*
|
||||
* @see #createClusterConf(Configuration, String, String)
|
||||
*/
|
||||
public static Configuration createClusterConf(Configuration baseConf, String clusterKey)
|
||||
throws IOException {
|
||||
return createClusterConf(baseConf, clusterKey, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates a {@link Configuration} instance by applying property overrides prefixed by
|
||||
* a cluster profile key to the base Configuration. Override properties are extracted by
|
||||
* the {@link #subset(Configuration, String)} method, then the merged on top of the base
|
||||
* Configuration and returned.
|
||||
*
|
||||
* @param baseConf the base configuration to use, containing prefixed override properties
|
||||
* @param clusterKey the ZooKeeper quorum cluster key to apply, or {@code null} if none
|
||||
* @param overridePrefix the property key prefix to match for override properties,
|
||||
* or {@code null} if none
|
||||
* @return the merged configuration with override properties and cluster key applied
|
||||
*/
|
||||
public static Configuration createClusterConf(Configuration baseConf, String clusterKey,
|
||||
String overridePrefix) throws IOException {
|
||||
Configuration clusterConf = HBaseConfiguration.create(baseConf);
|
||||
if (clusterKey != null && !clusterKey.isEmpty()) {
|
||||
applyClusterKeyToConf(clusterConf, clusterKey);
|
||||
}
|
||||
|
||||
if (overridePrefix != null && !overridePrefix.isEmpty()) {
|
||||
Configuration clusterSubset = HBaseConfiguration.subset(clusterConf, overridePrefix);
|
||||
HBaseConfiguration.merge(clusterConf, clusterSubset);
|
||||
}
|
||||
return clusterConf;
|
||||
}
|
||||
|
||||
/**
|
||||
* Apply the settings in the given key to the given configuration, this is
|
||||
* used to communicate with distant clusters
|
||||
* @param conf configuration object to configure
|
||||
* @param key string that contains the 3 required configuratins
|
||||
* @throws IOException
|
||||
*/
|
||||
private static void applyClusterKeyToConf(Configuration conf, String key)
|
||||
throws IOException{
|
||||
ZKConfig.ZKClusterKey zkClusterKey = ZKConfig.transformClusterKey(key);
|
||||
conf.set(HConstants.ZOOKEEPER_QUORUM, zkClusterKey.getQuorumString());
|
||||
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClusterKey.getClientPort());
|
||||
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, zkClusterKey.getZnodeParent());
|
||||
}
|
||||
|
||||
/**
|
||||
* For debugging. Dump configurations to system output as xml format.
|
||||
* Master and RS configurations can also be dumped using
|
||||
* http services. e.g. "curl http://master:16010/dump"
|
||||
*/
|
||||
|
|
|
@ -835,6 +835,18 @@ public final class HConstants {
|
|||
REPLICATION_SINK_SERVICE_CLASSNAME = "hbase.replication.sink.service";
|
||||
public static final String REPLICATION_SERVICE_CLASSNAME_DEFAULT =
|
||||
"org.apache.hadoop.hbase.replication.regionserver.Replication";
|
||||
public static final String REPLICATION_BULKLOAD_ENABLE_KEY = "hbase.replication.bulkload.enabled";
|
||||
public static final boolean REPLICATION_BULKLOAD_ENABLE_DEFAULT = false;
|
||||
/** Replication cluster id of source cluster which uniquely identifies itself with peer cluster */
|
||||
public static final String REPLICATION_CLUSTER_ID = "hbase.replication.cluster.id";
|
||||
/**
|
||||
* Directory where the source cluster file system client configuration are placed which is used by
|
||||
* sink cluster to copy HFiles from source cluster file system
|
||||
*/
|
||||
public static final String REPLICATION_CONF_DIR = "hbase.replication.conf.dir";
|
||||
|
||||
/** Maximum time to retry for a failed bulk load request */
|
||||
public static final String BULKLOAD_MAX_RETRIES_NUMBER = "hbase.bulkload.retries.number";
|
||||
|
||||
/** HBCK special code name used as server name when manipulating ZK nodes */
|
||||
public static final String HBCK_CODE_NAME = "HBCKServerName";
|
||||
|
@ -1231,7 +1243,7 @@ public final class HConstants {
|
|||
|
||||
public static final String HBASE_CANARY_WRITE_TABLE_CHECK_PERIOD_KEY =
|
||||
"hbase.canary.write.table.check.period";
|
||||
|
||||
|
||||
/**
|
||||
* Configuration keys for programmatic JAAS configuration for secured ZK interaction
|
||||
*/
|
||||
|
@ -1240,7 +1252,7 @@ public final class HConstants {
|
|||
"hbase.zookeeper.client.kerberos.principal";
|
||||
public static final String ZK_SERVER_KEYTAB_FILE = "hbase.zookeeper.server.keytab.file";
|
||||
public static final String ZK_SERVER_KERBEROS_PRINCIPAL =
|
||||
"hbase.zookeeper.server.kerberos.principal";
|
||||
"hbase.zookeeper.server.kerberos.principal";
|
||||
|
||||
private HConstants() {
|
||||
// Can't be instantiated with this ctor.
|
||||
|
|
|
@ -228,7 +228,7 @@ public abstract class ScheduledChore implements Runnable {
|
|||
&& getTimeBetweenRuns() > getMaximumAllowedTimeBetweenRuns();
|
||||
}
|
||||
|
||||
private synchronized double getMaximumAllowedTimeBetweenRuns() {
|
||||
private double getMaximumAllowedTimeBetweenRuns() {
|
||||
// Threshold used to determine if the Chore's current run started too late
|
||||
return 1.5 * period;
|
||||
}
|
||||
|
@ -268,23 +268,23 @@ public abstract class ScheduledChore implements Runnable {
|
|||
choreServicer = null;
|
||||
}
|
||||
|
||||
public synchronized String getName() {
|
||||
public String getName() {
|
||||
return name;
|
||||
}
|
||||
|
||||
public synchronized Stoppable getStopper() {
|
||||
public Stoppable getStopper() {
|
||||
return stopper;
|
||||
}
|
||||
|
||||
public synchronized int getPeriod() {
|
||||
public int getPeriod() {
|
||||
return period;
|
||||
}
|
||||
|
||||
public synchronized long getInitialDelay() {
|
||||
public long getInitialDelay() {
|
||||
return initialDelay;
|
||||
}
|
||||
|
||||
public final synchronized TimeUnit getTimeUnit() {
|
||||
public TimeUnit getTimeUnit() {
|
||||
return timeUnit;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ public class HFileContextBuilder {
|
|||
/** Whether mvcc is to be included in the Read/Write **/
|
||||
private boolean includesMvcc = true;
|
||||
/** Whether tags are to be included in the Read/Write **/
|
||||
private boolean includesTags;
|
||||
private boolean includesTags = false;
|
||||
/** Compression algorithm used **/
|
||||
private Algorithm compression = Algorithm.NONE;
|
||||
/** Whether tags to be compressed or not **/
|
||||
|
|
|
@ -604,7 +604,7 @@ public class Bytes {
|
|||
if (length != SIZEOF_LONG || offset + length > bytes.length) {
|
||||
throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_LONG);
|
||||
}
|
||||
if (UnsafeComparer.isAvailable()) {
|
||||
if (UnsafeComparer.unaligned()) {
|
||||
return toLongUnsafe(bytes, offset);
|
||||
} else {
|
||||
long l = 0;
|
||||
|
@ -645,7 +645,7 @@ public class Bytes {
|
|||
throw new IllegalArgumentException("Not enough room to put a long at"
|
||||
+ " offset " + offset + " in a " + bytes.length + " byte array");
|
||||
}
|
||||
if (UnsafeComparer.isAvailable()) {
|
||||
if (UnsafeComparer.unaligned()) {
|
||||
return putLongUnsafe(bytes, offset, val);
|
||||
} else {
|
||||
for(int i = offset + 7; i > offset; i--) {
|
||||
|
@ -800,7 +800,7 @@ public class Bytes {
|
|||
if (length != SIZEOF_INT || offset + length > bytes.length) {
|
||||
throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_INT);
|
||||
}
|
||||
if (UnsafeComparer.isAvailable()) {
|
||||
if (UnsafeComparer.unaligned()) {
|
||||
return toIntUnsafe(bytes, offset);
|
||||
} else {
|
||||
int n = 0;
|
||||
|
@ -896,7 +896,7 @@ public class Bytes {
|
|||
throw new IllegalArgumentException("Not enough room to put an int at"
|
||||
+ " offset " + offset + " in a " + bytes.length + " byte array");
|
||||
}
|
||||
if (UnsafeComparer.isAvailable()) {
|
||||
if (UnsafeComparer.unaligned()) {
|
||||
return putIntUnsafe(bytes, offset, val);
|
||||
} else {
|
||||
for(int i= offset + 3; i > offset; i--) {
|
||||
|
@ -970,7 +970,7 @@ public class Bytes {
|
|||
if (length != SIZEOF_SHORT || offset + length > bytes.length) {
|
||||
throw explainWrongLengthOrOffset(bytes, offset, length, SIZEOF_SHORT);
|
||||
}
|
||||
if (UnsafeComparer.isAvailable()) {
|
||||
if (UnsafeComparer.unaligned()) {
|
||||
return toShortUnsafe(bytes, offset);
|
||||
} else {
|
||||
short n = 0;
|
||||
|
@ -1008,7 +1008,7 @@ public class Bytes {
|
|||
throw new IllegalArgumentException("Not enough room to put a short at"
|
||||
+ " offset " + offset + " in a " + bytes.length + " byte array");
|
||||
}
|
||||
if (UnsafeComparer.isAvailable()) {
|
||||
if (UnsafeComparer.unaligned()) {
|
||||
return putShortUnsafe(bytes, offset, val);
|
||||
} else {
|
||||
bytes[offset+1] = (byte) val;
|
||||
|
@ -1315,28 +1315,19 @@ public class Bytes {
|
|||
INSTANCE;
|
||||
|
||||
static final Unsafe theUnsafe;
|
||||
private static boolean unaligned = false;
|
||||
|
||||
/** The offset to the first element in a byte array. */
|
||||
static final int BYTE_ARRAY_BASE_OFFSET;
|
||||
|
||||
static {
|
||||
theUnsafe = (Unsafe) AccessController.doPrivileged(
|
||||
new PrivilegedAction<Object>() {
|
||||
@Override
|
||||
public Object run() {
|
||||
try {
|
||||
Field f = Unsafe.class.getDeclaredField("theUnsafe");
|
||||
f.setAccessible(true);
|
||||
return f.get(null);
|
||||
} catch (NoSuchFieldException e) {
|
||||
// It doesn't matter what we throw;
|
||||
// it's swallowed in getBestComparer().
|
||||
throw new Error();
|
||||
} catch (IllegalAccessException e) {
|
||||
throw new Error();
|
||||
}
|
||||
}
|
||||
});
|
||||
if (UnsafeAccess.unaligned()) {
|
||||
theUnsafe = UnsafeAccess.theUnsafe;
|
||||
} else {
|
||||
// It doesn't matter what we throw;
|
||||
// it's swallowed in getBestComparer().
|
||||
throw new Error();
|
||||
}
|
||||
|
||||
BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class);
|
||||
|
||||
|
@ -1344,6 +1335,7 @@ public class Bytes {
|
|||
if (theUnsafe.arrayIndexScale(byte[].class) != 1) {
|
||||
throw new AssertionError();
|
||||
}
|
||||
unaligned = UnsafeAccess.unaligned();
|
||||
}
|
||||
|
||||
static final boolean littleEndian =
|
||||
|
@ -1403,6 +1395,14 @@ public class Bytes {
|
|||
return theUnsafe != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true when running JVM is having sun's Unsafe package available in it and underlying
|
||||
* system having unaligned-access capability.
|
||||
*/
|
||||
public static boolean unaligned() {
|
||||
return unaligned;
|
||||
}
|
||||
|
||||
/**
|
||||
* Lexicographically compare two arrays.
|
||||
*
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
package org.apache.hadoop.hbase.util;
|
||||
|
||||
import java.lang.reflect.Field;
|
||||
import java.lang.reflect.Method;
|
||||
import java.nio.ByteOrder;
|
||||
import java.security.AccessController;
|
||||
import java.security.PrivilegedAction;
|
||||
|
@ -39,6 +40,7 @@ public final class UnsafeAccess {
|
|||
|
||||
/** The offset to the first element in a byte array. */
|
||||
public static final int BYTE_ARRAY_BASE_OFFSET;
|
||||
private static boolean unaligned = false;
|
||||
|
||||
static {
|
||||
theUnsafe = (Unsafe) AccessController.doPrivileged(new PrivilegedAction<Object>() {
|
||||
|
@ -57,6 +59,15 @@ public final class UnsafeAccess {
|
|||
|
||||
if(theUnsafe != null){
|
||||
BYTE_ARRAY_BASE_OFFSET = theUnsafe.arrayBaseOffset(byte[].class);
|
||||
try {
|
||||
// Using java.nio.Bits#unaligned() to check for unaligned-access capability
|
||||
Class<?> clazz = Class.forName("java.nio.Bits");
|
||||
Method m = clazz.getDeclaredMethod("unaligned");
|
||||
m.setAccessible(true);
|
||||
unaligned = (boolean) m.invoke(null);
|
||||
} catch (Exception e) {
|
||||
unaligned = false;
|
||||
}
|
||||
} else{
|
||||
BYTE_ARRAY_BASE_OFFSET = -1;
|
||||
}
|
||||
|
@ -68,6 +79,14 @@ public final class UnsafeAccess {
|
|||
return theUnsafe != null;
|
||||
}
|
||||
|
||||
/**
|
||||
* @return true when running JVM is having sun's Unsafe package available in it and underlying
|
||||
* system having unaligned-access capability.
|
||||
*/
|
||||
public static boolean unaligned() {
|
||||
return unaligned;
|
||||
}
|
||||
|
||||
public static final boolean littleEndian = ByteOrder.nativeOrder()
|
||||
.equals(ByteOrder.LITTLE_ENDIAN);
|
||||
}
|
||||
|
|
|
@ -25,11 +25,12 @@ import java.util.List;
|
|||
import java.util.Map.Entry;
|
||||
import java.util.Properties;
|
||||
|
||||
import com.google.common.annotations.VisibleForTesting;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
|
||||
/**
|
||||
* Utility methods for reading, and building the ZooKeeper configuration.
|
||||
|
@ -40,7 +41,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
* (3). other zookeeper related properties in HBASE XML
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class ZKConfig {
|
||||
public final class ZKConfig {
|
||||
private static final Log LOG = LogFactory.getLog(ZKConfig.class);
|
||||
|
||||
private static final String VARIABLE_START = "${";
|
||||
|
@ -48,6 +49,9 @@ public class ZKConfig {
|
|||
private static final String VARIABLE_END = "}";
|
||||
private static final int VARIABLE_END_LENGTH = VARIABLE_END.length();
|
||||
|
||||
private ZKConfig() {
|
||||
}
|
||||
|
||||
/**
|
||||
* Make a Properties object holding ZooKeeper config.
|
||||
* Parses the corresponding config options from the HBase XML configs
|
||||
|
@ -85,7 +89,7 @@ public class ZKConfig {
|
|||
"' to false");
|
||||
// First check if there is a zoo.cfg in the CLASSPATH. If so, simply read
|
||||
// it and grab its configuration properties.
|
||||
ClassLoader cl = HQuorumPeer.class.getClassLoader();
|
||||
ClassLoader cl = ZKConfig.class.getClassLoader();
|
||||
final InputStream inputStream =
|
||||
cl.getResourceAsStream(HConstants.ZOOKEEPER_CONFIG_NAME);
|
||||
if (inputStream != null) {
|
||||
|
@ -305,31 +309,7 @@ public class ZKConfig {
|
|||
// Build the ZK quorum server string with "server:clientport" list, separated by ','
|
||||
final String[] serverHosts =
|
||||
conf.getStrings(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
|
||||
return buildQuorumServerString(serverHosts, defaultClientPort);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the ZK quorum server string with "server:clientport" list, separated by ','
|
||||
*
|
||||
* @param serverHosts a list of servers for ZK quorum
|
||||
* @param clientPort the default client port
|
||||
* @return the string for a list of "server:port" separated by ","
|
||||
*/
|
||||
public static String buildQuorumServerString(String[] serverHosts, String clientPort) {
|
||||
StringBuilder quorumStringBuilder = new StringBuilder();
|
||||
String serverHost;
|
||||
for (int i = 0; i < serverHosts.length; ++i) {
|
||||
if (serverHosts[i].contains(":")) {
|
||||
serverHost = serverHosts[i]; // just use the port specified from the input
|
||||
} else {
|
||||
serverHost = serverHosts[i] + ":" + clientPort;
|
||||
}
|
||||
if (i > 0) {
|
||||
quorumStringBuilder.append(',');
|
||||
}
|
||||
quorumStringBuilder.append(serverHost);
|
||||
}
|
||||
return quorumStringBuilder.toString();
|
||||
return buildZKQuorumServerString(serverHosts, defaultClientPort);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -347,4 +327,169 @@ public class ZKConfig {
|
|||
|
||||
return getZKQuorumServersStringFromHbaseConfig(conf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Build the ZK quorum server string with "server:clientport" list, separated by ','
|
||||
*
|
||||
* @param serverHosts a list of servers for ZK quorum
|
||||
* @param clientPort the default client port
|
||||
* @return the string for a list of "server:port" separated by ","
|
||||
*/
|
||||
public static String buildZKQuorumServerString(String[] serverHosts, String clientPort) {
|
||||
StringBuilder quorumStringBuilder = new StringBuilder();
|
||||
String serverHost;
|
||||
for (int i = 0; i < serverHosts.length; ++i) {
|
||||
if (serverHosts[i].contains(":")) {
|
||||
serverHost = serverHosts[i]; // just use the port specified from the input
|
||||
} else {
|
||||
serverHost = serverHosts[i] + ":" + clientPort;
|
||||
}
|
||||
if (i > 0) {
|
||||
quorumStringBuilder.append(',');
|
||||
}
|
||||
quorumStringBuilder.append(serverHost);
|
||||
}
|
||||
return quorumStringBuilder.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Verifies that the given key matches the expected format for a ZooKeeper cluster key.
|
||||
* The Quorum for the ZK cluster can have one the following formats (see examples below):
|
||||
*
|
||||
* <ol>
|
||||
* <li>s1,s2,s3 (no client port in the list, the client port could be obtained from
|
||||
* clientPort)</li>
|
||||
* <li>s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server,
|
||||
* in this case, the clientPort would be ignored)</li>
|
||||
* <li>s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use
|
||||
* the clientPort; otherwise, it would use the specified port)</li>
|
||||
* </ol>
|
||||
*
|
||||
* @param key the cluster key to validate
|
||||
* @throws IOException if the key could not be parsed
|
||||
*/
|
||||
public static void validateClusterKey(String key) throws IOException {
|
||||
transformClusterKey(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Separate the given key into the three configurations it should contain:
|
||||
* hbase.zookeeper.quorum, hbase.zookeeper.client.port
|
||||
* and zookeeper.znode.parent
|
||||
* @param key
|
||||
* @return the three configuration in the described order
|
||||
* @throws IOException
|
||||
*/
|
||||
public static ZKClusterKey transformClusterKey(String key) throws IOException {
|
||||
String[] parts = key.split(":");
|
||||
|
||||
if (parts.length == 3) {
|
||||
return new ZKClusterKey(parts [0], Integer.parseInt(parts [1]), parts [2]);
|
||||
}
|
||||
|
||||
if (parts.length > 3) {
|
||||
// The quorum could contain client port in server:clientport format, try to transform more.
|
||||
String zNodeParent = parts [parts.length - 1];
|
||||
String clientPort = parts [parts.length - 2];
|
||||
|
||||
// The first part length is the total length minus the lengths of other parts and minus 2 ":"
|
||||
int endQuorumIndex = key.length() - zNodeParent.length() - clientPort.length() - 2;
|
||||
String quorumStringInput = key.substring(0, endQuorumIndex);
|
||||
String[] serverHosts = quorumStringInput.split(",");
|
||||
|
||||
// The common case is that every server has its own client port specified - this means
|
||||
// that (total parts - the ZNodeParent part - the ClientPort part) is equal to
|
||||
// (the number of "," + 1) - "+ 1" because the last server has no ",".
|
||||
if ((parts.length - 2) == (serverHosts.length + 1)) {
|
||||
return new ZKClusterKey(quorumStringInput, Integer.parseInt(clientPort), zNodeParent);
|
||||
}
|
||||
|
||||
// For the uncommon case that some servers has no port specified, we need to build the
|
||||
// server:clientport list using default client port for servers without specified port.
|
||||
return new ZKClusterKey(
|
||||
buildZKQuorumServerString(serverHosts, clientPort),
|
||||
Integer.parseInt(clientPort),
|
||||
zNodeParent);
|
||||
}
|
||||
|
||||
throw new IOException("Cluster key passed " + key + " is invalid, the format should be:" +
|
||||
HConstants.ZOOKEEPER_QUORUM + ":" + HConstants.ZOOKEEPER_CLIENT_PORT + ":"
|
||||
+ HConstants.ZOOKEEPER_ZNODE_PARENT);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the key to the ZK ensemble for this configuration without
|
||||
* adding a name at the end
|
||||
* @param conf Configuration to use to build the key
|
||||
* @return ensemble key without a name
|
||||
*/
|
||||
public static String getZooKeeperClusterKey(Configuration conf) {
|
||||
return getZooKeeperClusterKey(conf, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the key to the ZK ensemble for this configuration and append
|
||||
* a name at the end
|
||||
* @param conf Configuration to use to build the key
|
||||
* @param name Name that should be appended at the end if not empty or null
|
||||
* @return ensemble key with a name (if any)
|
||||
*/
|
||||
public static String getZooKeeperClusterKey(Configuration conf, String name) {
|
||||
String ensemble = conf.get(HConstants.ZOOKEEPER_QUORUM).replaceAll(
|
||||
"[\\t\\n\\x0B\\f\\r]", "");
|
||||
StringBuilder builder = new StringBuilder(ensemble);
|
||||
builder.append(":");
|
||||
builder.append(conf.get(HConstants.ZOOKEEPER_CLIENT_PORT));
|
||||
builder.append(":");
|
||||
builder.append(conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
|
||||
if (name != null && !name.isEmpty()) {
|
||||
builder.append(",");
|
||||
builder.append(name);
|
||||
}
|
||||
return builder.toString();
|
||||
}
|
||||
|
||||
/**
|
||||
* Standardize the ZK quorum string: make it a "server:clientport" list, separated by ','
|
||||
* @param quorumStringInput a string contains a list of servers for ZK quorum
|
||||
* @param clientPort the default client port
|
||||
* @return the string for a list of "server:port" separated by ","
|
||||
*/
|
||||
@VisibleForTesting
|
||||
public static String standardizeZKQuorumServerString(String quorumStringInput,
|
||||
String clientPort) {
|
||||
String[] serverHosts = quorumStringInput.split(",");
|
||||
return buildZKQuorumServerString(serverHosts, clientPort);
|
||||
}
|
||||
|
||||
// The Quorum for the ZK cluster can have one the following format (see examples below):
|
||||
// (1). s1,s2,s3 (no client port in the list, the client port could be obtained from clientPort)
|
||||
// (2). s1:p1,s2:p2,s3:p3 (with client port, which could be same or different for each server,
|
||||
// in this case, the clientPort would be ignored)
|
||||
// (3). s1:p1,s2,s3:p3 (mix of (1) and (2) - if port is not specified in a server, it would use
|
||||
// the clientPort; otherwise, it would use the specified port)
|
||||
@VisibleForTesting
|
||||
public static class ZKClusterKey {
|
||||
private String quorumString;
|
||||
private int clientPort;
|
||||
private String znodeParent;
|
||||
|
||||
ZKClusterKey(String quorumString, int clientPort, String znodeParent) {
|
||||
this.quorumString = quorumString;
|
||||
this.clientPort = clientPort;
|
||||
this.znodeParent = znodeParent;
|
||||
}
|
||||
|
||||
public String getQuorumString() {
|
||||
return quorumString;
|
||||
}
|
||||
|
||||
public int getClientPort() {
|
||||
return clientPort;
|
||||
}
|
||||
|
||||
public String getZnodeParent() {
|
||||
return znodeParent;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -592,8 +592,10 @@ possible configurations would overwhelm and obscure the important.
|
|||
</property>
|
||||
<property>
|
||||
<name>hbase.regions.slop</name>
|
||||
<value>0.2</value>
|
||||
<description>Rebalance if any regionserver has average + (average * slop) regions.</description>
|
||||
<value>0.001</value>
|
||||
<description>Rebalance if any regionserver has average + (average * slop) regions.
|
||||
The default value of this parameter is 0.001 in StochasticLoadBalancer (the default load balancer),
|
||||
while the default is 0.2 in other load balancers (i.e., SimpleLoadBalancer).</description>
|
||||
</property>
|
||||
<property>
|
||||
<name>hbase.server.thread.wakefrequency</name>
|
||||
|
|
|
@ -176,6 +176,12 @@ public class HBaseCommonTestingUtility {
|
|||
return new Path(PathName);
|
||||
}
|
||||
|
||||
public Path getRandomDir() {
|
||||
String randomStr = UUID.randomUUID().toString();
|
||||
Path testPath = new Path(getBaseTestDir(), randomStr);
|
||||
return testPath;
|
||||
}
|
||||
|
||||
/**
|
||||
* @param dir Directory to delete
|
||||
* @return True if we deleted it.
|
||||
|
|
|
@ -42,6 +42,8 @@ import java.util.jar.Manifest;
|
|||
import javax.tools.JavaCompiler;
|
||||
import javax.tools.ToolProvider;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.AfterClass;
|
||||
import org.junit.BeforeClass;
|
||||
|
@ -49,10 +51,12 @@ import org.junit.Rule;
|
|||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
import org.junit.rules.TestName;
|
||||
import org.mortbay.log.Log;
|
||||
|
||||
@Category(SmallTests.class)
|
||||
public class TestClassFinder {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(TestClassFinder.class);
|
||||
|
||||
@Rule public TestName name = new TestName();
|
||||
private static final HBaseCommonTestingUtility testUtil = new HBaseCommonTestingUtility();
|
||||
private static final String BASEPKG = "tfcpkg";
|
||||
|
@ -78,7 +82,7 @@ public class TestClassFinder {
|
|||
deleteTestDir();
|
||||
}
|
||||
assertTrue(testDir.mkdirs());
|
||||
Log.info("Using new, clean directory=" + testDir);
|
||||
LOG.info("Using new, clean directory=" + testDir);
|
||||
}
|
||||
|
||||
@AfterClass
|
||||
|
@ -141,7 +145,7 @@ public class TestClassFinder {
|
|||
public void testClassFinderFiltersByNameInJar() throws Exception {
|
||||
final long counter = testCounter.incrementAndGet();
|
||||
final String classNamePrefix = name.getMethodName();
|
||||
Log.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
|
||||
LOG.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
|
||||
|
||||
ClassFinder.FileNameFilter notExcNameFilter = new ClassFinder.FileNameFilter() {
|
||||
@Override
|
||||
|
@ -161,7 +165,7 @@ public class TestClassFinder {
|
|||
public void testClassFinderFiltersByClassInJar() throws Exception {
|
||||
final long counter = testCounter.incrementAndGet();
|
||||
final String classNamePrefix = name.getMethodName();
|
||||
Log.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
|
||||
LOG.info("Created jar " + createAndLoadJar("", classNamePrefix, counter));
|
||||
|
||||
final ClassFinder.ClassFilter notExcClassFilter = new ClassFinder.ClassFilter() {
|
||||
@Override
|
||||
|
@ -223,7 +227,7 @@ public class TestClassFinder {
|
|||
final long counter = testCounter.incrementAndGet();
|
||||
final String classNamePrefix = name.getMethodName();
|
||||
String pkgNameSuffix = name.getMethodName();
|
||||
Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
|
||||
LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
|
||||
ClassFinder allClassesFinder = new ClassFinder();
|
||||
String pkgName = makePackageName(pkgNameSuffix, counter);
|
||||
Set<Class<?>> allClasses = allClassesFinder.findClasses(pkgName, false);
|
||||
|
@ -246,7 +250,7 @@ public class TestClassFinder {
|
|||
final long counter = testCounter.incrementAndGet();
|
||||
final String classNamePrefix = name.getMethodName();
|
||||
String pkgNameSuffix = name.getMethodName();
|
||||
Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
|
||||
LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
|
||||
final String classNameToFilterOut = classNamePrefix + counter;
|
||||
final ClassFinder.FileNameFilter notThisFilter = new ClassFinder.FileNameFilter() {
|
||||
@Override
|
||||
|
@ -271,7 +275,7 @@ public class TestClassFinder {
|
|||
final long counter = testCounter.incrementAndGet();
|
||||
final String classNamePrefix = name.getMethodName();
|
||||
String pkgNameSuffix = name.getMethodName();
|
||||
Log.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
|
||||
LOG.info("Created jar " + createAndLoadJar(pkgNameSuffix, classNamePrefix, counter));
|
||||
final Class<?> clazz = makeClass(pkgNameSuffix, classNamePrefix, counter);
|
||||
final ClassFinder.ClassFilter notThisFilter = new ClassFinder.ClassFilter() {
|
||||
@Override
|
||||
|
|
|
@ -20,6 +20,7 @@ package org.apache.hadoop.hbase;
|
|||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertNull;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
import static org.junit.Assert.fail;
|
||||
|
||||
import java.io.IOException;
|
||||
|
@ -27,10 +28,12 @@ import java.lang.reflect.InvocationTargetException;
|
|||
import java.lang.reflect.Method;
|
||||
import java.util.List;
|
||||
|
||||
import com.google.common.collect.ImmutableMap;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
|
@ -72,8 +75,11 @@ public class TestHBaseConfiguration {
|
|||
String prefix = "hbase.mapred.output.";
|
||||
conf.set("hbase.security.authentication", "kerberos");
|
||||
conf.set("hbase.regionserver.kerberos.principal", "hbasesource");
|
||||
conf.set(prefix + "hbase.regionserver.kerberos.principal", "hbasedest");
|
||||
conf.set(prefix, "shouldbemissing");
|
||||
HBaseConfiguration.setWithPrefix(conf, prefix,
|
||||
ImmutableMap.of(
|
||||
"hbase.regionserver.kerberos.principal", "hbasedest",
|
||||
"", "shouldbemissing")
|
||||
.entrySet());
|
||||
|
||||
Configuration subsetConf = HBaseConfiguration.subset(conf, prefix);
|
||||
assertNull(subsetConf.get(prefix + "hbase.regionserver.kerberos.principal"));
|
||||
|
|
|
@ -0,0 +1,126 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package org.apache.hadoop.hbase.zookeeper;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Properties;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.testclassification.MiscTests;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
import org.junit.Assert;
|
||||
import org.junit.Test;
|
||||
import org.junit.experimental.categories.Category;
|
||||
|
||||
import static org.junit.Assert.assertEquals;
|
||||
import static org.junit.Assert.assertTrue;
|
||||
|
||||
@Category({MiscTests.class, SmallTests.class})
|
||||
public class TestZKConfig {
|
||||
|
||||
@Test
|
||||
public void testZKConfigLoading() throws Exception {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
// Test that we read only from the config instance
|
||||
// (i.e. via hbase-default.xml and hbase-site.xml)
|
||||
conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, 2181);
|
||||
Properties props = ZKConfig.makeZKProps(conf);
|
||||
assertEquals("Property client port should have been default from the HBase config",
|
||||
"2181",
|
||||
props.getProperty("clientPort"));
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testGetZooKeeperClusterKey() {
|
||||
Configuration conf = HBaseConfiguration.create();
|
||||
conf.set(HConstants.ZOOKEEPER_QUORUM, "\tlocalhost\n");
|
||||
conf.set(HConstants.ZOOKEEPER_CLIENT_PORT, "3333");
|
||||
conf.set(HConstants.ZOOKEEPER_ZNODE_PARENT, "hbase");
|
||||
String clusterKey = ZKConfig.getZooKeeperClusterKey(conf, "test");
|
||||
assertTrue(!clusterKey.contains("\t") && !clusterKey.contains("\n"));
|
||||
assertEquals("localhost:3333:hbase,test", clusterKey);
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterKey() throws Exception {
|
||||
testKey("server", 2181, "hbase");
|
||||
testKey("server1,server2,server3", 2181, "hbase");
|
||||
try {
|
||||
ZKConfig.validateClusterKey("2181:hbase");
|
||||
} catch (IOException ex) {
|
||||
// OK
|
||||
}
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testClusterKeyWithMultiplePorts() throws Exception {
|
||||
// server has different port than the default port
|
||||
testKey("server1:2182", 2181, "hbase", true);
|
||||
// multiple servers have their own port
|
||||
testKey("server1:2182,server2:2183,server3:2184", 2181, "hbase", true);
|
||||
// one server has no specified port, should use default port
|
||||
testKey("server1:2182,server2,server3:2184", 2181, "hbase", true);
|
||||
// the last server has no specified port, should use default port
|
||||
testKey("server1:2182,server2:2183,server3", 2181, "hbase", true);
|
||||
// multiple servers have no specified port, should use default port for those servers
|
||||
testKey("server1:2182,server2,server3:2184,server4", 2181, "hbase", true);
|
||||
// same server, different ports
|
||||
testKey("server1:2182,server1:2183,server1", 2181, "hbase", true);
|
||||
// mix of same server/different port and different server
|
||||
testKey("server1:2182,server2:2183,server1", 2181, "hbase", true);
|
||||
}
|
||||
|
||||
private void testKey(String ensemble, int port, String znode)
|
||||
throws IOException {
|
||||
testKey(ensemble, port, znode, false); // not support multiple client ports
|
||||
}
|
||||
|
||||
private void testKey(String ensemble, int port, String znode, Boolean multiplePortSupport)
|
||||
throws IOException {
|
||||
Configuration conf = new Configuration();
|
||||
String key = ensemble+":"+port+":"+znode;
|
||||
String ensemble2 = null;
|
||||
ZKConfig.ZKClusterKey zkClusterKey = ZKConfig.transformClusterKey(key);
|
||||
if (multiplePortSupport) {
|
||||
ensemble2 = ZKConfig.standardizeZKQuorumServerString(ensemble,
|
||||
Integer.toString(port));
|
||||
assertEquals(ensemble2, zkClusterKey.getQuorumString());
|
||||
}
|
||||
else {
|
||||
assertEquals(ensemble, zkClusterKey.getQuorumString());
|
||||
}
|
||||
assertEquals(port, zkClusterKey.getClientPort());
|
||||
assertEquals(znode, zkClusterKey.getZnodeParent());
|
||||
|
||||
conf = HBaseConfiguration.createClusterConf(conf, key);
|
||||
assertEquals(zkClusterKey.getQuorumString(), conf.get(HConstants.ZOOKEEPER_QUORUM));
|
||||
assertEquals(zkClusterKey.getClientPort(), conf.getInt(HConstants.ZOOKEEPER_CLIENT_PORT, -1));
|
||||
assertEquals(zkClusterKey.getZnodeParent(), conf.get(HConstants.ZOOKEEPER_ZNODE_PARENT));
|
||||
|
||||
String reconstructedKey = ZKConfig.getZooKeeperClusterKey(conf);
|
||||
if (multiplePortSupport) {
|
||||
String key2 = ensemble2 + ":" + port + ":" + znode;
|
||||
assertEquals(key2, reconstructedKey);
|
||||
}
|
||||
else {
|
||||
assertEquals(key, reconstructedKey);
|
||||
}
|
||||
}
|
||||
}
|
|
@ -64,7 +64,9 @@ public class MemcachedBlockCache implements BlockCache {
|
|||
public static final String MEMCACHED_CONFIG_KEY = "hbase.cache.memcached.servers";
|
||||
public static final String MEMCACHED_TIMEOUT_KEY = "hbase.cache.memcached.timeout";
|
||||
public static final String MEMCACHED_OPTIMEOUT_KEY = "hbase.cache.memcached.optimeout";
|
||||
public static final String MEMCACHED_OPTIMIZE_KEY = "hbase.cache.memcached.spy.optimze";
|
||||
public static final long MEMCACHED_DEFAULT_TIMEOUT = 500;
|
||||
public static final boolean MEMCACHED_OPTIMIZE_DEFAULT = false;
|
||||
|
||||
private final MemcachedClient client;
|
||||
private final HFileBlockTranscoder tc = new HFileBlockTranscoder();
|
||||
|
@ -75,18 +77,16 @@ public class MemcachedBlockCache implements BlockCache {
|
|||
|
||||
long opTimeout = c.getLong(MEMCACHED_OPTIMEOUT_KEY, MEMCACHED_DEFAULT_TIMEOUT);
|
||||
long queueTimeout = c.getLong(MEMCACHED_TIMEOUT_KEY, opTimeout + MEMCACHED_DEFAULT_TIMEOUT);
|
||||
boolean optimize = c.getBoolean(MEMCACHED_OPTIMIZE_KEY, MEMCACHED_OPTIMIZE_DEFAULT);
|
||||
|
||||
ConnectionFactoryBuilder builder = new ConnectionFactoryBuilder()
|
||||
.setOpTimeout(opTimeout)
|
||||
.setOpQueueMaxBlockTime(queueTimeout) // Cap the max time before anything times out
|
||||
.setFailureMode(FailureMode.Redistribute)
|
||||
.setShouldOptimize(true) // When regions move lots of reads happen together
|
||||
// So combining them into single requests is nice.
|
||||
.setShouldOptimize(optimize)
|
||||
.setDaemon(true) // Don't keep threads around past the end of days.
|
||||
.setUseNagleAlgorithm(false) // Ain't nobody got time for that
|
||||
.setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // 4 times larger than the
|
||||
// default block just in case
|
||||
|
||||
.setReadBufferSize(HConstants.DEFAULT_BLOCKSIZE * 4 * 1024); // Much larger just in case
|
||||
|
||||
// Assume only the localhost is serving memecached.
|
||||
// A la mcrouter or co-locating memcached with split regionservers.
|
||||
|
|
|
@ -74,6 +74,9 @@ public interface MetricsHBaseServerSource extends BaseSource {
|
|||
String EXCEPTIONS_SANITY_NAME="exceptions.FailedSanityCheckException";
|
||||
String EXCEPTIONS_MOVED_NAME="exceptions.RegionMovedException";
|
||||
String EXCEPTIONS_NSRE_NAME="exceptions.NotServingRegionException";
|
||||
String EXCEPTIONS_MULTI_TOO_LARGE_NAME = "exceptions.multiResponseTooLarge";
|
||||
String EXCEPTIONS_MULTI_TOO_LARGE_DESC = "A response to a multi request was too large and the " +
|
||||
"rest of the requests will have to be retried.";
|
||||
|
||||
void authorizationSuccess();
|
||||
|
||||
|
@ -96,6 +99,7 @@ public interface MetricsHBaseServerSource extends BaseSource {
|
|||
void notServingRegionException();
|
||||
void unknownScannerException();
|
||||
void tooBusyException();
|
||||
void multiActionTooLargeException();
|
||||
|
||||
void sentBytes(long count);
|
||||
|
||||
|
@ -110,4 +114,6 @@ public interface MetricsHBaseServerSource extends BaseSource {
|
|||
void processedCall(int processingTime);
|
||||
|
||||
void queuedAndProcessedCall(int totalTime);
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
|
|
@ -22,9 +22,11 @@ public interface MetricsReplicationSinkSource {
|
|||
public static final String SINK_AGE_OF_LAST_APPLIED_OP = "sink.ageOfLastAppliedOp";
|
||||
public static final String SINK_APPLIED_BATCHES = "sink.appliedBatches";
|
||||
public static final String SINK_APPLIED_OPS = "sink.appliedOps";
|
||||
public static final String SINK_APPLIED_HFILES = "sink.appliedHFiles";
|
||||
|
||||
void setLastAppliedOpAge(long age);
|
||||
void incrAppliedBatches(long batches);
|
||||
void incrAppliedOps(long batchsize);
|
||||
long getLastAppliedOpAge();
|
||||
void incrAppliedHFiles(long hfileSize);
|
||||
}
|
||||
|
|
|
@ -32,6 +32,9 @@ public interface MetricsReplicationSourceSource {
|
|||
|
||||
public static final String SOURCE_LOG_EDITS_FILTERED = "source.logEditsFiltered";
|
||||
|
||||
public static final String SOURCE_SHIPPED_HFILES = "source.shippedHFiles";
|
||||
public static final String SOURCE_SIZE_OF_HFILE_REFS_QUEUE = "source.sizeOfHFileRefsQueue";
|
||||
|
||||
void setLastShippedAge(long age);
|
||||
void setSizeOfLogQueue(int size);
|
||||
void incrSizeOfLogQueue(int size);
|
||||
|
@ -44,4 +47,7 @@ public interface MetricsReplicationSourceSource {
|
|||
void incrLogReadInEdits(long size);
|
||||
void clear();
|
||||
long getLastShippedAge();
|
||||
void incrHFilesShipped(long hfiles);
|
||||
void incrSizeOfHFileRefsQueue(long size);
|
||||
void decrSizeOfHFileRefsQueue(long size);
|
||||
}
|
||||
|
|
|
@ -31,6 +31,7 @@ import org.apache.hadoop.metrics2.lib.MutableHistogram;
|
|||
public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
|
||||
implements MetricsHBaseServerSource {
|
||||
|
||||
|
||||
private final MetricsHBaseServerWrapper wrapper;
|
||||
private final MutableCounterLong authorizationSuccesses;
|
||||
private final MutableCounterLong authorizationFailures;
|
||||
|
@ -47,6 +48,7 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
|
|||
private final MutableCounterLong exceptionsSanity;
|
||||
private final MutableCounterLong exceptionsNSRE;
|
||||
private final MutableCounterLong exceptionsMoved;
|
||||
private final MutableCounterLong exceptionsMultiTooLarge;
|
||||
|
||||
|
||||
private MutableHistogram queueCallTime;
|
||||
|
@ -81,6 +83,8 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
|
|||
.newCounter(EXCEPTIONS_MOVED_NAME, EXCEPTIONS_TYPE_DESC, 0L);
|
||||
this.exceptionsNSRE = this.getMetricsRegistry()
|
||||
.newCounter(EXCEPTIONS_NSRE_NAME, EXCEPTIONS_TYPE_DESC, 0L);
|
||||
this.exceptionsMultiTooLarge = this.getMetricsRegistry()
|
||||
.newCounter(EXCEPTIONS_MULTI_TOO_LARGE_NAME, EXCEPTIONS_MULTI_TOO_LARGE_DESC, 0L);
|
||||
|
||||
this.authenticationSuccesses = this.getMetricsRegistry().newCounter(
|
||||
AUTHENTICATION_SUCCESSES_NAME, AUTHENTICATION_SUCCESSES_DESC, 0L);
|
||||
|
@ -159,6 +163,11 @@ public class MetricsHBaseServerSourceImpl extends BaseSourceImpl
|
|||
exceptionsBusy.incr();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void multiActionTooLargeException() {
|
||||
exceptionsMultiTooLarge.incr();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void authenticationSuccess() {
|
||||
authenticationSuccesses.incr();
|
||||
|
|
|
@ -32,6 +32,8 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS
|
|||
private final MutableCounterLong shippedOpsCounter;
|
||||
private final MutableCounterLong shippedKBsCounter;
|
||||
private final MutableCounterLong logReadInBytesCounter;
|
||||
private final MutableCounterLong shippedHFilesCounter;
|
||||
private final MutableGaugeLong sizeOfHFileRefsQueueGauge;
|
||||
|
||||
public MetricsReplicationGlobalSourceSource(MetricsReplicationSourceImpl rms) {
|
||||
this.rms = rms;
|
||||
|
@ -51,6 +53,11 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS
|
|||
logReadInEditsCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_LOG_READ_IN_EDITS, 0L);
|
||||
|
||||
logEditsFilteredCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_LOG_EDITS_FILTERED, 0L);
|
||||
|
||||
shippedHFilesCounter = rms.getMetricsRegistry().getLongCounter(SOURCE_SHIPPED_HFILES, 0L);
|
||||
|
||||
sizeOfHFileRefsQueueGauge =
|
||||
rms.getMetricsRegistry().getLongGauge(SOURCE_SIZE_OF_HFILE_REFS_QUEUE, 0L);
|
||||
}
|
||||
|
||||
@Override public void setLastShippedAge(long age) {
|
||||
|
@ -100,4 +107,18 @@ public class MetricsReplicationGlobalSourceSource implements MetricsReplicationS
|
|||
public long getLastShippedAge() {
|
||||
return ageOfLastShippedOpGauge.value();
|
||||
}
|
||||
|
||||
@Override public void incrHFilesShipped(long hfiles) {
|
||||
shippedHFilesCounter.incr(hfiles);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrSizeOfHFileRefsQueue(long size) {
|
||||
sizeOfHFileRefsQueueGauge.incr(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void decrSizeOfHFileRefsQueue(long size) {
|
||||
sizeOfHFileRefsQueueGauge.decr(size);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,11 +26,13 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS
|
|||
private final MutableGaugeLong ageGauge;
|
||||
private final MutableCounterLong batchesCounter;
|
||||
private final MutableCounterLong opsCounter;
|
||||
private final MutableCounterLong hfilesCounter;
|
||||
|
||||
public MetricsReplicationSinkSourceImpl(MetricsReplicationSourceImpl rms) {
|
||||
ageGauge = rms.getMetricsRegistry().getLongGauge(SINK_AGE_OF_LAST_APPLIED_OP, 0L);
|
||||
batchesCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_BATCHES, 0L);
|
||||
opsCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_OPS, 0L);
|
||||
hfilesCounter = rms.getMetricsRegistry().getLongCounter(SINK_APPLIED_HFILES, 0L);
|
||||
}
|
||||
|
||||
@Override public void setLastAppliedOpAge(long age) {
|
||||
|
@ -49,4 +51,9 @@ public class MetricsReplicationSinkSourceImpl implements MetricsReplicationSinkS
|
|||
public long getLastAppliedOpAge() {
|
||||
return ageGauge.value();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrAppliedHFiles(long hfiles) {
|
||||
hfilesCounter.incr(hfiles);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -32,6 +32,8 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou
|
|||
private final String shippedOpsKey;
|
||||
private final String shippedKBsKey;
|
||||
private final String logReadInBytesKey;
|
||||
private final String shippedHFilesKey;
|
||||
private final String sizeOfHFileRefsQueueKey;
|
||||
|
||||
private final MutableGaugeLong ageOfLastShippedOpGauge;
|
||||
private final MutableGaugeLong sizeOfLogQueueGauge;
|
||||
|
@ -41,6 +43,8 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou
|
|||
private final MutableCounterLong shippedOpsCounter;
|
||||
private final MutableCounterLong shippedKBsCounter;
|
||||
private final MutableCounterLong logReadInBytesCounter;
|
||||
private final MutableCounterLong shippedHFilesCounter;
|
||||
private final MutableGaugeLong sizeOfHFileRefsQueueGauge;
|
||||
|
||||
public MetricsReplicationSourceSourceImpl(MetricsReplicationSourceImpl rms, String id) {
|
||||
this.rms = rms;
|
||||
|
@ -69,6 +73,12 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou
|
|||
|
||||
logEditsFilteredKey = "source." + id + ".logEditsFiltered";
|
||||
logEditsFilteredCounter = rms.getMetricsRegistry().getLongCounter(logEditsFilteredKey, 0L);
|
||||
|
||||
shippedHFilesKey = "source." + this.id + ".shippedHFiles";
|
||||
shippedHFilesCounter = rms.getMetricsRegistry().getLongCounter(shippedHFilesKey, 0L);
|
||||
|
||||
sizeOfHFileRefsQueueKey = "source." + id + ".sizeOfHFileRefsQueue";
|
||||
sizeOfHFileRefsQueueGauge = rms.getMetricsRegistry().getLongGauge(sizeOfHFileRefsQueueKey, 0L);
|
||||
}
|
||||
|
||||
@Override public void setLastShippedAge(long age) {
|
||||
|
@ -124,10 +134,28 @@ public class MetricsReplicationSourceSourceImpl implements MetricsReplicationSou
|
|||
rms.removeMetric(logReadInEditsKey);
|
||||
|
||||
rms.removeMetric(logEditsFilteredKey);
|
||||
|
||||
rms.removeMetric(shippedHFilesKey);
|
||||
rms.removeMetric(sizeOfHFileRefsQueueKey);
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getLastShippedAge() {
|
||||
return ageOfLastShippedOpGauge.value();
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrHFilesShipped(long hfiles) {
|
||||
shippedHFilesCounter.incr(hfiles);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrSizeOfHFileRefsQueue(long size) {
|
||||
sizeOfHFileRefsQueueGauge.incr(size);
|
||||
}
|
||||
|
||||
@Override
|
||||
public void decrSizeOfHFileRefsQueue(long size) {
|
||||
sizeOfHFileRefsQueueGauge.decr(size);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -103,7 +103,7 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
|
||||
protected static final int DEFAULT_NUM_REGIONS = 50; // number of regions in pre-split tables
|
||||
|
||||
private boolean keepTableAtTheEnd = false;
|
||||
private boolean keepObjectsAtTheEnd = false;
|
||||
protected HBaseCluster cluster;
|
||||
|
||||
protected Connection connection;
|
||||
|
@ -144,11 +144,19 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
|
||||
@Override
|
||||
public void cleanUpCluster() throws Exception {
|
||||
if (!keepTableAtTheEnd) {
|
||||
if (!keepObjectsAtTheEnd) {
|
||||
Admin admin = util.getHBaseAdmin();
|
||||
admin.disableTables("ittable-\\d+");
|
||||
admin.deleteTables("ittable-\\d+");
|
||||
NamespaceDescriptor [] nsds = admin.listNamespaceDescriptors();
|
||||
for(NamespaceDescriptor nsd:nsds ) {
|
||||
if(nsd.getName().matches("itnamespace\\d+")) {
|
||||
LOG.info("Removing namespace="+nsd.getName());
|
||||
admin.deleteNamespace(nsd.getName());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enabledTables.clear();
|
||||
disabledTables.clear();
|
||||
deletedTables.clear();
|
||||
|
@ -938,9 +946,9 @@ public class IntegrationTestDDLMasterFailover extends IntegrationTestBase {
|
|||
LOG.info("Running hbck");
|
||||
hbck = HbckTestingUtil.doFsck(util.getConfiguration(), false);
|
||||
if (HbckTestingUtil.inconsistencyFound(hbck)) {
|
||||
// Find the inconsistency during HBCK. Leave table undropped so that
|
||||
// Find the inconsistency during HBCK. Leave table and namespace undropped so that
|
||||
// we can check outside the test.
|
||||
keepTableAtTheEnd = true;
|
||||
keepObjectsAtTheEnd = true;
|
||||
}
|
||||
HbckTestingUtil.assertNoErrors(hbck);
|
||||
LOG.info("Finished hbck");
|
||||
|
|
|
@ -27,7 +27,6 @@ import java.util.TreeMap;
|
|||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.ProcedureProtos;
|
||||
|
||||
/**
|
||||
|
@ -356,25 +355,19 @@ public class ProcedureStoreTracker {
|
|||
}
|
||||
}
|
||||
|
||||
public void insert(final Procedure proc, final Procedure[] subprocs) {
|
||||
insert(proc.getProcId());
|
||||
if (subprocs != null) {
|
||||
for (int i = 0; i < subprocs.length; ++i) {
|
||||
insert(subprocs[i].getProcId());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public void update(final Procedure proc) {
|
||||
update(proc.getProcId());
|
||||
}
|
||||
|
||||
public void insert(long procId) {
|
||||
BitSetNode node = getOrCreateNode(procId);
|
||||
node.update(procId);
|
||||
trackProcIds(procId);
|
||||
}
|
||||
|
||||
public void insert(final long procId, final long[] subProcIds) {
|
||||
update(procId);
|
||||
for (int i = 0; i < subProcIds.length; ++i) {
|
||||
insert(subProcIds[i]);
|
||||
}
|
||||
}
|
||||
|
||||
public void update(long procId) {
|
||||
Map.Entry<Long, BitSetNode> entry = map.floorEntry(procId);
|
||||
assert entry != null : "expected node to update procId=" + procId;
|
||||
|
|
|
@ -100,7 +100,6 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
|
||||
private final LinkedList<ProcedureWALFile> logs = new LinkedList<ProcedureWALFile>();
|
||||
private final ProcedureStoreTracker storeTracker = new ProcedureStoreTracker();
|
||||
private final AtomicLong inactiveLogsMaxId = new AtomicLong(0);
|
||||
private final ReentrantLock lock = new ReentrantLock();
|
||||
private final Condition waitCond = lock.newCondition();
|
||||
private final Condition slotCond = lock.newCondition();
|
||||
|
@ -191,19 +190,16 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
}
|
||||
|
||||
LOG.info("Stopping the WAL Procedure Store");
|
||||
if (lock.tryLock()) {
|
||||
try {
|
||||
waitCond.signalAll();
|
||||
syncCond.signalAll();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
sendStopSignal();
|
||||
|
||||
if (!abort) {
|
||||
try {
|
||||
syncThread.join();
|
||||
while (syncThread.isAlive()) {
|
||||
sendStopSignal();
|
||||
syncThread.join(250);
|
||||
}
|
||||
} catch (InterruptedException e) {
|
||||
LOG.warn("join interrupted", e);
|
||||
Thread.currentThread().interrupt();
|
||||
}
|
||||
}
|
||||
|
@ -220,6 +216,17 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
logs.clear();
|
||||
}
|
||||
|
||||
private void sendStopSignal() {
|
||||
if (lock.tryLock()) {
|
||||
try {
|
||||
waitCond.signalAll();
|
||||
syncCond.signalAll();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public int getNumThreads() {
|
||||
return slots == null ? 0 : slots.length;
|
||||
|
@ -239,31 +246,36 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
|
||||
@Override
|
||||
public void recoverLease() throws IOException {
|
||||
LOG.info("Starting WAL Procedure Store lease recovery");
|
||||
FileStatus[] oldLogs = getLogFiles();
|
||||
while (isRunning()) {
|
||||
// Get Log-MaxID and recover lease on old logs
|
||||
flushLogId = initOldLogs(oldLogs);
|
||||
lock.lock();
|
||||
try {
|
||||
LOG.info("Starting WAL Procedure Store lease recovery");
|
||||
FileStatus[] oldLogs = getLogFiles();
|
||||
while (isRunning()) {
|
||||
// Get Log-MaxID and recover lease on old logs
|
||||
flushLogId = initOldLogs(oldLogs);
|
||||
|
||||
// Create new state-log
|
||||
if (!rollWriter(flushLogId + 1)) {
|
||||
// someone else has already created this log
|
||||
LOG.debug("someone else has already created log " + flushLogId);
|
||||
continue;
|
||||
}
|
||||
|
||||
// We have the lease on the log
|
||||
oldLogs = getLogFiles();
|
||||
if (getMaxLogId(oldLogs) > flushLogId) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Someone else created new logs. Expected maxLogId < " + flushLogId);
|
||||
// Create new state-log
|
||||
if (!rollWriter(flushLogId + 1)) {
|
||||
// someone else has already created this log
|
||||
LOG.debug("someone else has already created log " + flushLogId);
|
||||
continue;
|
||||
}
|
||||
logs.getLast().removeFile();
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG.info("Lease acquired for flushLogId: " + flushLogId);
|
||||
break;
|
||||
// We have the lease on the log
|
||||
oldLogs = getLogFiles();
|
||||
if (getMaxLogId(oldLogs) > flushLogId) {
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Someone else created new logs. Expected maxLogId < " + flushLogId);
|
||||
}
|
||||
logs.getLast().removeFile();
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG.info("Lease acquired for flushLogId: " + flushLogId);
|
||||
break;
|
||||
}
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -335,18 +347,22 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
}
|
||||
|
||||
ByteSlot slot = acquireSlot();
|
||||
long logId = -1;
|
||||
try {
|
||||
// Serialize the insert
|
||||
long[] subProcIds = null;
|
||||
if (subprocs != null) {
|
||||
ProcedureWALFormat.writeInsert(slot, proc, subprocs);
|
||||
subProcIds = new long[subprocs.length];
|
||||
for (int i = 0; i < subprocs.length; ++i) {
|
||||
subProcIds[i] = subprocs[i].getProcId();
|
||||
}
|
||||
} else {
|
||||
assert !proc.hasParent();
|
||||
ProcedureWALFormat.writeInsert(slot, proc);
|
||||
}
|
||||
|
||||
// Push the transaction data and wait until it is persisted
|
||||
pushData(slot);
|
||||
pushData(PushType.INSERT, slot, proc.getProcId(), subProcIds);
|
||||
} catch (IOException e) {
|
||||
// We are not able to serialize the procedure.
|
||||
// this is a code error, and we are not able to go on.
|
||||
|
@ -356,14 +372,6 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
} finally {
|
||||
releaseSlot(slot);
|
||||
}
|
||||
|
||||
// Update the store tracker
|
||||
synchronized (storeTracker) {
|
||||
storeTracker.insert(proc, subprocs);
|
||||
if (logId == flushLogId) {
|
||||
checkAndTryRoll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -373,13 +381,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
}
|
||||
|
||||
ByteSlot slot = acquireSlot();
|
||||
long logId = -1;
|
||||
try {
|
||||
// Serialize the update
|
||||
ProcedureWALFormat.writeUpdate(slot, proc);
|
||||
|
||||
// Push the transaction data and wait until it is persisted
|
||||
logId = pushData(slot);
|
||||
pushData(PushType.UPDATE, slot, proc.getProcId(), null);
|
||||
} catch (IOException e) {
|
||||
// We are not able to serialize the procedure.
|
||||
// this is a code error, and we are not able to go on.
|
||||
|
@ -388,20 +395,6 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
} finally {
|
||||
releaseSlot(slot);
|
||||
}
|
||||
|
||||
// Update the store tracker
|
||||
boolean removeOldLogs = false;
|
||||
synchronized (storeTracker) {
|
||||
storeTracker.update(proc);
|
||||
if (logId == flushLogId) {
|
||||
removeOldLogs = storeTracker.isUpdated();
|
||||
checkAndTryRoll();
|
||||
}
|
||||
}
|
||||
|
||||
if (removeOldLogs) {
|
||||
setInactiveLogsMaxId(logId - 1);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
|
@ -411,13 +404,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
}
|
||||
|
||||
ByteSlot slot = acquireSlot();
|
||||
long logId = -1;
|
||||
try {
|
||||
// Serialize the delete
|
||||
ProcedureWALFormat.writeDelete(slot, procId);
|
||||
|
||||
// Push the transaction data and wait until it is persisted
|
||||
logId = pushData(slot);
|
||||
pushData(PushType.DELETE, slot, procId, null);
|
||||
} catch (IOException e) {
|
||||
// We are not able to serialize the procedure.
|
||||
// this is a code error, and we are not able to go on.
|
||||
|
@ -426,22 +418,6 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
} finally {
|
||||
releaseSlot(slot);
|
||||
}
|
||||
|
||||
boolean removeOldLogs = false;
|
||||
synchronized (storeTracker) {
|
||||
storeTracker.delete(procId);
|
||||
if (logId == flushLogId) {
|
||||
if (storeTracker.isEmpty() || storeTracker.isUpdated()) {
|
||||
removeOldLogs = checkAndTryRoll();
|
||||
} else {
|
||||
checkAndTryRoll();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (removeOldLogs) {
|
||||
setInactiveLogsMaxId(logId);
|
||||
}
|
||||
}
|
||||
|
||||
private ByteSlot acquireSlot() {
|
||||
|
@ -454,7 +430,10 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
slotsCache.offer(slot);
|
||||
}
|
||||
|
||||
private long pushData(final ByteSlot slot) {
|
||||
private enum PushType { INSERT, UPDATE, DELETE };
|
||||
|
||||
private long pushData(final PushType type, final ByteSlot slot,
|
||||
final long procId, final long[] subProcIds) {
|
||||
if (!isRunning()) {
|
||||
throw new RuntimeException("the store must be running before inserting data");
|
||||
}
|
||||
|
@ -481,6 +460,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
}
|
||||
}
|
||||
|
||||
updateStoreTracker(type, procId, subProcIds);
|
||||
slots[slotIndex++] = slot;
|
||||
logId = flushLogId;
|
||||
|
||||
|
@ -509,20 +489,29 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
return logId;
|
||||
}
|
||||
|
||||
private boolean isSyncAborted() {
|
||||
return syncException.get() != null;
|
||||
private void updateStoreTracker(final PushType type,
|
||||
final long procId, final long[] subProcIds) {
|
||||
switch (type) {
|
||||
case INSERT:
|
||||
if (subProcIds == null) {
|
||||
storeTracker.insert(procId);
|
||||
} else {
|
||||
storeTracker.insert(procId, subProcIds);
|
||||
}
|
||||
break;
|
||||
case UPDATE:
|
||||
storeTracker.update(procId);
|
||||
break;
|
||||
case DELETE:
|
||||
storeTracker.delete(procId);
|
||||
break;
|
||||
default:
|
||||
throw new RuntimeException("invalid push type " + type);
|
||||
}
|
||||
}
|
||||
|
||||
protected void periodicRoll() throws IOException {
|
||||
long logId;
|
||||
boolean removeOldLogs;
|
||||
synchronized (storeTracker) {
|
||||
logId = flushLogId;
|
||||
removeOldLogs = storeTracker.isEmpty();
|
||||
}
|
||||
if (checkAndTryRoll() && removeOldLogs) {
|
||||
setInactiveLogsMaxId(logId);
|
||||
}
|
||||
private boolean isSyncAborted() {
|
||||
return syncException.get() != null;
|
||||
}
|
||||
|
||||
private void syncLoop() throws Throwable {
|
||||
|
@ -534,7 +523,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
// Wait until new data is available
|
||||
if (slotIndex == 0) {
|
||||
if (!loading.get()) {
|
||||
removeInactiveLogs();
|
||||
periodicRoll();
|
||||
}
|
||||
|
||||
if (LOG.isTraceEnabled()) {
|
||||
|
@ -547,7 +536,6 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
waitCond.await(getMillisToNextPeriodicRoll(), TimeUnit.MILLISECONDS);
|
||||
if (slotIndex == 0) {
|
||||
// no data.. probably a stop() or a periodic roll
|
||||
periodicRoll();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
@ -560,13 +548,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
long syncWaitMs = System.currentTimeMillis() - syncWaitSt;
|
||||
if (LOG.isTraceEnabled() && (syncWaitMs > 10 || slotIndex < slots.length)) {
|
||||
float rollSec = getMillisFromLastRoll() / 1000.0f;
|
||||
LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s/sec",
|
||||
LOG.trace(String.format("Sync wait %s, slotIndex=%s , totalSynced=%s (%s/sec)",
|
||||
StringUtils.humanTimeDiff(syncWaitMs), slotIndex,
|
||||
StringUtils.humanSize(totalSynced.get()),
|
||||
StringUtils.humanSize(totalSynced.get() / rollSec)));
|
||||
}
|
||||
|
||||
|
||||
inSync.set(true);
|
||||
totalSynced.addAndGet(syncSlots());
|
||||
slotIndex = 0;
|
||||
|
@ -639,8 +626,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
return totalSynced;
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
public boolean rollWriterOrDie() {
|
||||
private boolean rollWriterOrDie() {
|
||||
for (int i = 1; i <= rollRetries; ++i) {
|
||||
try {
|
||||
if (rollWriter()) {
|
||||
|
@ -656,17 +642,13 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
throw new RuntimeException("unable to roll the log");
|
||||
}
|
||||
|
||||
protected boolean checkAndTryRoll() {
|
||||
if (!isRunning()) return false;
|
||||
|
||||
if (totalSynced.get() > rollThreshold || getMillisToNextPeriodicRoll() <= 0) {
|
||||
try {
|
||||
return rollWriter();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to roll the log", e);
|
||||
}
|
||||
private boolean tryRollWriter() {
|
||||
try {
|
||||
return rollWriter();
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to roll the log", e);
|
||||
return false;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
private long getMillisToNextPeriodicRoll() {
|
||||
|
@ -680,7 +662,52 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
return (System.currentTimeMillis() - lastRollTs.get());
|
||||
}
|
||||
|
||||
protected boolean rollWriter() throws IOException {
|
||||
@VisibleForTesting
|
||||
protected void periodicRollForTesting() throws IOException {
|
||||
lock.lock();
|
||||
try {
|
||||
periodicRoll();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
@VisibleForTesting
|
||||
protected boolean rollWriterForTesting() throws IOException {
|
||||
lock.lock();
|
||||
try {
|
||||
return rollWriter();
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
private void periodicRoll() throws IOException {
|
||||
if (storeTracker.isEmpty()) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("no active procedures");
|
||||
}
|
||||
tryRollWriter();
|
||||
removeAllLogs(flushLogId - 1);
|
||||
} else {
|
||||
if (storeTracker.isUpdated()) {
|
||||
if (LOG.isTraceEnabled()) {
|
||||
LOG.trace("all the active procedures are in the latest log");
|
||||
}
|
||||
removeAllLogs(flushLogId - 1);
|
||||
}
|
||||
|
||||
// if the log size has exceeded the roll threshold
|
||||
// or the periodic roll timeout is expired, try to roll the wal.
|
||||
if (totalSynced.get() > rollThreshold || getMillisToNextPeriodicRoll() <= 0) {
|
||||
tryRollWriter();
|
||||
}
|
||||
|
||||
removeInactiveLogs();
|
||||
}
|
||||
}
|
||||
|
||||
private boolean rollWriter() throws IOException {
|
||||
// Create new state-log
|
||||
if (!rollWriter(flushLogId + 1)) {
|
||||
LOG.warn("someone else has already created log " + flushLogId);
|
||||
|
@ -701,6 +728,7 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
|
||||
private boolean rollWriter(final long logId) throws IOException {
|
||||
assert logId > flushLogId : "logId=" + logId + " flushLogId=" + flushLogId;
|
||||
assert lock.isHeldByCurrentThread() : "expected to be the lock owner. " + lock.isLocked();
|
||||
|
||||
ProcedureWALHeader header = ProcedureWALHeader.newBuilder()
|
||||
.setVersion(ProcedureWALFormat.HEADER_VERSION)
|
||||
|
@ -730,20 +758,16 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
newStream.close();
|
||||
return false;
|
||||
}
|
||||
lock.lock();
|
||||
try {
|
||||
closeStream();
|
||||
synchronized (storeTracker) {
|
||||
storeTracker.resetUpdates();
|
||||
}
|
||||
stream = newStream;
|
||||
flushLogId = logId;
|
||||
totalSynced.set(0);
|
||||
lastRollTs.set(System.currentTimeMillis());
|
||||
logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos));
|
||||
} finally {
|
||||
lock.unlock();
|
||||
}
|
||||
|
||||
closeStream();
|
||||
|
||||
storeTracker.resetUpdates();
|
||||
stream = newStream;
|
||||
flushLogId = logId;
|
||||
totalSynced.set(0);
|
||||
lastRollTs.set(System.currentTimeMillis());
|
||||
logs.add(new ProcedureWALFile(fs, newLogFile, header, startPos));
|
||||
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Roll new state log: " + logId);
|
||||
}
|
||||
|
@ -754,11 +778,9 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
try {
|
||||
if (stream != null) {
|
||||
try {
|
||||
synchronized (storeTracker) {
|
||||
ProcedureWALFile log = logs.getLast();
|
||||
log.setProcIds(storeTracker.getUpdatedMinProcId(), storeTracker.getUpdatedMaxProcId());
|
||||
ProcedureWALFormat.writeTrailer(stream, storeTracker);
|
||||
}
|
||||
ProcedureWALFile log = logs.getLast();
|
||||
log.setProcIds(storeTracker.getUpdatedMinProcId(), storeTracker.getUpdatedMaxProcId());
|
||||
ProcedureWALFormat.writeTrailer(stream, storeTracker);
|
||||
} catch (IOException e) {
|
||||
LOG.warn("Unable to write the trailer: " + e.getMessage());
|
||||
}
|
||||
|
@ -774,30 +796,12 @@ public class WALProcedureStore extends ProcedureStoreBase {
|
|||
// ==========================================================================
|
||||
// Log Files cleaner helpers
|
||||
// ==========================================================================
|
||||
private void setInactiveLogsMaxId(long logId) {
|
||||
long expect = 0;
|
||||
while (!inactiveLogsMaxId.compareAndSet(expect, logId)) {
|
||||
expect = inactiveLogsMaxId.get();
|
||||
if (expect >= logId) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private void removeInactiveLogs() {
|
||||
long lastLogId = inactiveLogsMaxId.get();
|
||||
if (lastLogId != 0) {
|
||||
removeAllLogs(lastLogId);
|
||||
inactiveLogsMaxId.compareAndSet(lastLogId, 0);
|
||||
}
|
||||
|
||||
// Verify if the ProcId of the first oldest is still active. if not remove the file.
|
||||
while (logs.size() > 1) {
|
||||
ProcedureWALFile log = logs.getFirst();
|
||||
synchronized (storeTracker) {
|
||||
if (storeTracker.isTracking(log.getMinProcId(), log.getMaxProcId())) {
|
||||
break;
|
||||
}
|
||||
if (storeTracker.isTracking(log.getMinProcId(), log.getMaxProcId())) {
|
||||
break;
|
||||
}
|
||||
removeLogFile(log);
|
||||
}
|
||||
|
|
|
@ -312,7 +312,7 @@ public class TestProcedureRecovery {
|
|||
public void testRunningProcWithSameNonce() throws Exception {
|
||||
final long nonceGroup = 456;
|
||||
final long nonce = 33333;
|
||||
Procedure proc = new TestMultiStepProcedure();
|
||||
Procedure proc = new TestSingleStepProcedure();
|
||||
long procId = ProcedureTestingUtility.submitAndWait(procExecutor, proc, nonceGroup, nonce);
|
||||
|
||||
// Restart (use a latch to prevent the step execution until we submitted proc2)
|
||||
|
@ -320,7 +320,7 @@ public class TestProcedureRecovery {
|
|||
procEnv.setWaitLatch(latch);
|
||||
restart();
|
||||
// Submit a procedure with the same nonce and expect the same procedure would return.
|
||||
Procedure proc2 = new TestMultiStepProcedure();
|
||||
Procedure proc2 = new TestSingleStepProcedure();
|
||||
long procId2 = procExecutor.submitProcedure(proc2, nonceGroup, nonce);
|
||||
latch.countDown();
|
||||
procEnv.setWaitLatch(null);
|
||||
|
|
|
@ -25,7 +25,6 @@ import java.util.concurrent.atomic.AtomicLong;
|
|||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||
import org.apache.hadoop.hbase.testclassification.SmallTests;
|
||||
|
||||
import org.junit.Assert;
|
||||
|
@ -41,27 +40,6 @@ import static org.junit.Assert.fail;
|
|||
public class TestProcedureStoreTracker {
|
||||
private static final Log LOG = LogFactory.getLog(TestProcedureStoreTracker.class);
|
||||
|
||||
static class TestProcedure extends Procedure<Void> {
|
||||
public TestProcedure(long procId) {
|
||||
setProcId(procId);
|
||||
}
|
||||
|
||||
@Override
|
||||
protected Procedure[] execute(Void env) { return null; }
|
||||
|
||||
@Override
|
||||
protected void rollback(Void env) { /* no-op */ }
|
||||
|
||||
@Override
|
||||
protected boolean abort(Void env) { return false; }
|
||||
|
||||
@Override
|
||||
protected void serializeStateData(final OutputStream stream) { /* no-op */ }
|
||||
|
||||
@Override
|
||||
protected void deserializeStateData(final InputStream stream) { /* no-op */ }
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testSeqInsertAndDelete() {
|
||||
ProcedureStoreTracker tracker = new ProcedureStoreTracker();
|
||||
|
@ -161,13 +139,10 @@ public class TestProcedureStoreTracker {
|
|||
ProcedureStoreTracker tracker = new ProcedureStoreTracker();
|
||||
assertTrue(tracker.isEmpty());
|
||||
|
||||
Procedure[] procs = new TestProcedure[] {
|
||||
new TestProcedure(1), new TestProcedure(2), new TestProcedure(3),
|
||||
new TestProcedure(4), new TestProcedure(5), new TestProcedure(6),
|
||||
};
|
||||
long[] procs = new long[] { 1, 2, 3, 4, 5, 6 };
|
||||
|
||||
tracker.insert(procs[0], null);
|
||||
tracker.insert(procs[1], new Procedure[] { procs[2], procs[3], procs[4] });
|
||||
tracker.insert(procs[0]);
|
||||
tracker.insert(procs[1], new long[] { procs[2], procs[3], procs[4] });
|
||||
assertFalse(tracker.isEmpty());
|
||||
assertTrue(tracker.isUpdated());
|
||||
|
||||
|
@ -189,11 +164,11 @@ public class TestProcedureStoreTracker {
|
|||
assertTrue(tracker.isUpdated());
|
||||
|
||||
for (int i = 0; i < 5; ++i) {
|
||||
tracker.delete(procs[i].getProcId());
|
||||
tracker.delete(procs[i]);
|
||||
assertFalse(tracker.isEmpty());
|
||||
assertTrue(tracker.isUpdated());
|
||||
}
|
||||
tracker.delete(procs[5].getProcId());
|
||||
tracker.delete(procs[5]);
|
||||
assertTrue(tracker.isEmpty());
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ public class TestWALProcedureStore {
|
|||
@Test
|
||||
public void testEmptyRoll() throws Exception {
|
||||
for (int i = 0; i < 10; ++i) {
|
||||
procStore.periodicRoll();
|
||||
procStore.periodicRollForTesting();
|
||||
}
|
||||
FileStatus[] status = fs.listStatus(logDir);
|
||||
assertEquals(1, status.length);
|
||||
|
@ -215,14 +215,14 @@ public class TestWALProcedureStore {
|
|||
procStore.update(rootProcs[i-1]);
|
||||
}
|
||||
// insert root-child txn
|
||||
procStore.rollWriter();
|
||||
procStore.rollWriterForTesting();
|
||||
for (int i = 1; i <= rootProcs.length; i++) {
|
||||
TestProcedure b = new TestProcedure(rootProcs.length + i, i);
|
||||
rootProcs[i-1].addStackId(1);
|
||||
procStore.insert(rootProcs[i-1], new Procedure[] { b });
|
||||
}
|
||||
// insert child updates
|
||||
procStore.rollWriter();
|
||||
procStore.rollWriterForTesting();
|
||||
for (int i = 1; i <= rootProcs.length; i++) {
|
||||
procStore.update(new TestProcedure(rootProcs.length + i, i));
|
||||
}
|
||||
|
@ -230,9 +230,10 @@ public class TestWALProcedureStore {
|
|||
// Stop the store
|
||||
procStore.stop(false);
|
||||
|
||||
// Remove 4 byte from the trailer
|
||||
// the first log was removed,
|
||||
// we have insert-txn and updates in the others so everything is fine
|
||||
FileStatus[] logs = fs.listStatus(logDir);
|
||||
assertEquals(3, logs.length);
|
||||
assertEquals(Arrays.toString(logs), 2, logs.length);
|
||||
Arrays.sort(logs, new Comparator<FileStatus>() {
|
||||
@Override
|
||||
public int compare(FileStatus o1, FileStatus o2) {
|
||||
|
@ -240,15 +241,13 @@ public class TestWALProcedureStore {
|
|||
}
|
||||
});
|
||||
|
||||
// Remove the first log, we have insert-txn and updates in the others so everything is fine.
|
||||
fs.delete(logs[0].getPath(), false);
|
||||
LoadCounter loader = new LoadCounter();
|
||||
storeRestart(loader);
|
||||
assertEquals(rootProcs.length * 2, loader.getLoadedCount());
|
||||
assertEquals(0, loader.getCorruptedCount());
|
||||
|
||||
// Remove the second log, we have lost any root/parent references
|
||||
fs.delete(logs[1].getPath(), false);
|
||||
// Remove the second log, we have lost all the root/parent references
|
||||
fs.delete(logs[0].getPath(), false);
|
||||
loader.reset();
|
||||
storeRestart(loader);
|
||||
assertEquals(0, loader.getLoadedCount());
|
||||
|
@ -277,7 +276,7 @@ public class TestWALProcedureStore {
|
|||
b.addStackId(1);
|
||||
procStore.update(b);
|
||||
|
||||
procStore.rollWriter();
|
||||
procStore.rollWriterForTesting();
|
||||
|
||||
a.addStackId(2);
|
||||
procStore.update(a);
|
||||
|
@ -326,7 +325,7 @@ public class TestWALProcedureStore {
|
|||
b.addStackId(2);
|
||||
procStore.update(b);
|
||||
|
||||
procStore.rollWriter();
|
||||
procStore.rollWriterForTesting();
|
||||
|
||||
b.addStackId(3);
|
||||
procStore.update(b);
|
||||
|
@ -427,6 +426,36 @@ public class TestWALProcedureStore {
|
|||
assertEquals(1, procStore.getActiveLogs().size());
|
||||
}
|
||||
|
||||
@Test
|
||||
public void testRollAndRemove() throws IOException {
|
||||
// Insert something in the log
|
||||
Procedure proc1 = new TestSequentialProcedure();
|
||||
procStore.insert(proc1, null);
|
||||
|
||||
Procedure proc2 = new TestSequentialProcedure();
|
||||
procStore.insert(proc2, null);
|
||||
|
||||
// roll the log, now we have 2
|
||||
procStore.rollWriterForTesting();
|
||||
assertEquals(2, procStore.getActiveLogs().size());
|
||||
|
||||
// everything will be up to date in the second log
|
||||
// so we can remove the first one
|
||||
procStore.update(proc1);
|
||||
procStore.update(proc2);
|
||||
assertEquals(1, procStore.getActiveLogs().size());
|
||||
|
||||
// roll the log, now we have 2
|
||||
procStore.rollWriterForTesting();
|
||||
assertEquals(2, procStore.getActiveLogs().size());
|
||||
|
||||
// remove everything active
|
||||
// so we can remove all the logs
|
||||
procStore.delete(proc1.getProcId());
|
||||
procStore.delete(proc2.getProcId());
|
||||
assertEquals(1, procStore.getActiveLogs().size());
|
||||
}
|
||||
|
||||
private void corruptLog(final FileStatus logFile, final long dropBytes)
|
||||
throws IOException {
|
||||
assertTrue(logFile.getLen() > dropBytes);
|
||||
|
|
|
@ -16896,6 +16896,51 @@ public final class AdminProtos {
|
|||
*/
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntryOrBuilder getEntryOrBuilder(
|
||||
int index);
|
||||
|
||||
// optional string replicationClusterId = 2;
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
boolean hasReplicationClusterId();
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
java.lang.String getReplicationClusterId();
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
com.google.protobuf.ByteString
|
||||
getReplicationClusterIdBytes();
|
||||
|
||||
// optional string sourceBaseNamespaceDirPath = 3;
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
boolean hasSourceBaseNamespaceDirPath();
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
java.lang.String getSourceBaseNamespaceDirPath();
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
com.google.protobuf.ByteString
|
||||
getSourceBaseNamespaceDirPathBytes();
|
||||
|
||||
// optional string sourceHFileArchiveDirPath = 4;
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
boolean hasSourceHFileArchiveDirPath();
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
java.lang.String getSourceHFileArchiveDirPath();
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
com.google.protobuf.ByteString
|
||||
getSourceHFileArchiveDirPathBytes();
|
||||
}
|
||||
/**
|
||||
* Protobuf type {@code hbase.pb.ReplicateWALEntryRequest}
|
||||
|
@ -16963,6 +17008,21 @@ public final class AdminProtos {
|
|||
entry_.add(input.readMessage(org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry.PARSER, extensionRegistry));
|
||||
break;
|
||||
}
|
||||
case 18: {
|
||||
bitField0_ |= 0x00000001;
|
||||
replicationClusterId_ = input.readBytes();
|
||||
break;
|
||||
}
|
||||
case 26: {
|
||||
bitField0_ |= 0x00000002;
|
||||
sourceBaseNamespaceDirPath_ = input.readBytes();
|
||||
break;
|
||||
}
|
||||
case 34: {
|
||||
bitField0_ |= 0x00000004;
|
||||
sourceHFileArchiveDirPath_ = input.readBytes();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
||||
|
@ -17005,6 +17065,7 @@ public final class AdminProtos {
|
|||
return PARSER;
|
||||
}
|
||||
|
||||
private int bitField0_;
|
||||
// repeated .hbase.pb.WALEntry entry = 1;
|
||||
public static final int ENTRY_FIELD_NUMBER = 1;
|
||||
private java.util.List<org.apache.hadoop.hbase.protobuf.generated.AdminProtos.WALEntry> entry_;
|
||||
|
@ -17041,8 +17102,140 @@ public final class AdminProtos {
|
|||
return entry_.get(index);
|
||||
}
|
||||
|
||||
// optional string replicationClusterId = 2;
|
||||
public static final int REPLICATIONCLUSTERID_FIELD_NUMBER = 2;
|
||||
private java.lang.Object replicationClusterId_;
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public boolean hasReplicationClusterId() {
|
||||
return ((bitField0_ & 0x00000001) == 0x00000001);
|
||||
}
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public java.lang.String getReplicationClusterId() {
|
||||
java.lang.Object ref = replicationClusterId_;
|
||||
if (ref instanceof java.lang.String) {
|
||||
return (java.lang.String) ref;
|
||||
} else {
|
||||
com.google.protobuf.ByteString bs =
|
||||
(com.google.protobuf.ByteString) ref;
|
||||
java.lang.String s = bs.toStringUtf8();
|
||||
if (bs.isValidUtf8()) {
|
||||
replicationClusterId_ = s;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public com.google.protobuf.ByteString
|
||||
getReplicationClusterIdBytes() {
|
||||
java.lang.Object ref = replicationClusterId_;
|
||||
if (ref instanceof java.lang.String) {
|
||||
com.google.protobuf.ByteString b =
|
||||
com.google.protobuf.ByteString.copyFromUtf8(
|
||||
(java.lang.String) ref);
|
||||
replicationClusterId_ = b;
|
||||
return b;
|
||||
} else {
|
||||
return (com.google.protobuf.ByteString) ref;
|
||||
}
|
||||
}
|
||||
|
||||
// optional string sourceBaseNamespaceDirPath = 3;
|
||||
public static final int SOURCEBASENAMESPACEDIRPATH_FIELD_NUMBER = 3;
|
||||
private java.lang.Object sourceBaseNamespaceDirPath_;
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public boolean hasSourceBaseNamespaceDirPath() {
|
||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public java.lang.String getSourceBaseNamespaceDirPath() {
|
||||
java.lang.Object ref = sourceBaseNamespaceDirPath_;
|
||||
if (ref instanceof java.lang.String) {
|
||||
return (java.lang.String) ref;
|
||||
} else {
|
||||
com.google.protobuf.ByteString bs =
|
||||
(com.google.protobuf.ByteString) ref;
|
||||
java.lang.String s = bs.toStringUtf8();
|
||||
if (bs.isValidUtf8()) {
|
||||
sourceBaseNamespaceDirPath_ = s;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public com.google.protobuf.ByteString
|
||||
getSourceBaseNamespaceDirPathBytes() {
|
||||
java.lang.Object ref = sourceBaseNamespaceDirPath_;
|
||||
if (ref instanceof java.lang.String) {
|
||||
com.google.protobuf.ByteString b =
|
||||
com.google.protobuf.ByteString.copyFromUtf8(
|
||||
(java.lang.String) ref);
|
||||
sourceBaseNamespaceDirPath_ = b;
|
||||
return b;
|
||||
} else {
|
||||
return (com.google.protobuf.ByteString) ref;
|
||||
}
|
||||
}
|
||||
|
||||
// optional string sourceHFileArchiveDirPath = 4;
|
||||
public static final int SOURCEHFILEARCHIVEDIRPATH_FIELD_NUMBER = 4;
|
||||
private java.lang.Object sourceHFileArchiveDirPath_;
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public boolean hasSourceHFileArchiveDirPath() {
|
||||
return ((bitField0_ & 0x00000004) == 0x00000004);
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public java.lang.String getSourceHFileArchiveDirPath() {
|
||||
java.lang.Object ref = sourceHFileArchiveDirPath_;
|
||||
if (ref instanceof java.lang.String) {
|
||||
return (java.lang.String) ref;
|
||||
} else {
|
||||
com.google.protobuf.ByteString bs =
|
||||
(com.google.protobuf.ByteString) ref;
|
||||
java.lang.String s = bs.toStringUtf8();
|
||||
if (bs.isValidUtf8()) {
|
||||
sourceHFileArchiveDirPath_ = s;
|
||||
}
|
||||
return s;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public com.google.protobuf.ByteString
|
||||
getSourceHFileArchiveDirPathBytes() {
|
||||
java.lang.Object ref = sourceHFileArchiveDirPath_;
|
||||
if (ref instanceof java.lang.String) {
|
||||
com.google.protobuf.ByteString b =
|
||||
com.google.protobuf.ByteString.copyFromUtf8(
|
||||
(java.lang.String) ref);
|
||||
sourceHFileArchiveDirPath_ = b;
|
||||
return b;
|
||||
} else {
|
||||
return (com.google.protobuf.ByteString) ref;
|
||||
}
|
||||
}
|
||||
|
||||
private void initFields() {
|
||||
entry_ = java.util.Collections.emptyList();
|
||||
replicationClusterId_ = "";
|
||||
sourceBaseNamespaceDirPath_ = "";
|
||||
sourceHFileArchiveDirPath_ = "";
|
||||
}
|
||||
private byte memoizedIsInitialized = -1;
|
||||
public final boolean isInitialized() {
|
||||
|
@ -17065,6 +17258,15 @@ public final class AdminProtos {
|
|||
for (int i = 0; i < entry_.size(); i++) {
|
||||
output.writeMessage(1, entry_.get(i));
|
||||
}
|
||||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||
output.writeBytes(2, getReplicationClusterIdBytes());
|
||||
}
|
||||
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
output.writeBytes(3, getSourceBaseNamespaceDirPathBytes());
|
||||
}
|
||||
if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
||||
output.writeBytes(4, getSourceHFileArchiveDirPathBytes());
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
|
@ -17078,6 +17280,18 @@ public final class AdminProtos {
|
|||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeMessageSize(1, entry_.get(i));
|
||||
}
|
||||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(2, getReplicationClusterIdBytes());
|
||||
}
|
||||
if (((bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(3, getSourceBaseNamespaceDirPathBytes());
|
||||
}
|
||||
if (((bitField0_ & 0x00000004) == 0x00000004)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(4, getSourceHFileArchiveDirPathBytes());
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
|
@ -17103,6 +17317,21 @@ public final class AdminProtos {
|
|||
boolean result = true;
|
||||
result = result && getEntryList()
|
||||
.equals(other.getEntryList());
|
||||
result = result && (hasReplicationClusterId() == other.hasReplicationClusterId());
|
||||
if (hasReplicationClusterId()) {
|
||||
result = result && getReplicationClusterId()
|
||||
.equals(other.getReplicationClusterId());
|
||||
}
|
||||
result = result && (hasSourceBaseNamespaceDirPath() == other.hasSourceBaseNamespaceDirPath());
|
||||
if (hasSourceBaseNamespaceDirPath()) {
|
||||
result = result && getSourceBaseNamespaceDirPath()
|
||||
.equals(other.getSourceBaseNamespaceDirPath());
|
||||
}
|
||||
result = result && (hasSourceHFileArchiveDirPath() == other.hasSourceHFileArchiveDirPath());
|
||||
if (hasSourceHFileArchiveDirPath()) {
|
||||
result = result && getSourceHFileArchiveDirPath()
|
||||
.equals(other.getSourceHFileArchiveDirPath());
|
||||
}
|
||||
result = result &&
|
||||
getUnknownFields().equals(other.getUnknownFields());
|
||||
return result;
|
||||
|
@ -17120,6 +17349,18 @@ public final class AdminProtos {
|
|||
hash = (37 * hash) + ENTRY_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getEntryList().hashCode();
|
||||
}
|
||||
if (hasReplicationClusterId()) {
|
||||
hash = (37 * hash) + REPLICATIONCLUSTERID_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getReplicationClusterId().hashCode();
|
||||
}
|
||||
if (hasSourceBaseNamespaceDirPath()) {
|
||||
hash = (37 * hash) + SOURCEBASENAMESPACEDIRPATH_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getSourceBaseNamespaceDirPath().hashCode();
|
||||
}
|
||||
if (hasSourceHFileArchiveDirPath()) {
|
||||
hash = (37 * hash) + SOURCEHFILEARCHIVEDIRPATH_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getSourceHFileArchiveDirPath().hashCode();
|
||||
}
|
||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||
memoizedHashCode = hash;
|
||||
return hash;
|
||||
|
@ -17243,6 +17484,12 @@ public final class AdminProtos {
|
|||
} else {
|
||||
entryBuilder_.clear();
|
||||
}
|
||||
replicationClusterId_ = "";
|
||||
bitField0_ = (bitField0_ & ~0x00000002);
|
||||
sourceBaseNamespaceDirPath_ = "";
|
||||
bitField0_ = (bitField0_ & ~0x00000004);
|
||||
sourceHFileArchiveDirPath_ = "";
|
||||
bitField0_ = (bitField0_ & ~0x00000008);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -17270,6 +17517,7 @@ public final class AdminProtos {
|
|||
public org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest buildPartial() {
|
||||
org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest result = new org.apache.hadoop.hbase.protobuf.generated.AdminProtos.ReplicateWALEntryRequest(this);
|
||||
int from_bitField0_ = bitField0_;
|
||||
int to_bitField0_ = 0;
|
||||
if (entryBuilder_ == null) {
|
||||
if (((bitField0_ & 0x00000001) == 0x00000001)) {
|
||||
entry_ = java.util.Collections.unmodifiableList(entry_);
|
||||
|
@ -17279,6 +17527,19 @@ public final class AdminProtos {
|
|||
} else {
|
||||
result.entry_ = entryBuilder_.build();
|
||||
}
|
||||
if (((from_bitField0_ & 0x00000002) == 0x00000002)) {
|
||||
to_bitField0_ |= 0x00000001;
|
||||
}
|
||||
result.replicationClusterId_ = replicationClusterId_;
|
||||
if (((from_bitField0_ & 0x00000004) == 0x00000004)) {
|
||||
to_bitField0_ |= 0x00000002;
|
||||
}
|
||||
result.sourceBaseNamespaceDirPath_ = sourceBaseNamespaceDirPath_;
|
||||
if (((from_bitField0_ & 0x00000008) == 0x00000008)) {
|
||||
to_bitField0_ |= 0x00000004;
|
||||
}
|
||||
result.sourceHFileArchiveDirPath_ = sourceHFileArchiveDirPath_;
|
||||
result.bitField0_ = to_bitField0_;
|
||||
onBuilt();
|
||||
return result;
|
||||
}
|
||||
|
@ -17320,6 +17581,21 @@ public final class AdminProtos {
|
|||
}
|
||||
}
|
||||
}
|
||||
if (other.hasReplicationClusterId()) {
|
||||
bitField0_ |= 0x00000002;
|
||||
replicationClusterId_ = other.replicationClusterId_;
|
||||
onChanged();
|
||||
}
|
||||
if (other.hasSourceBaseNamespaceDirPath()) {
|
||||
bitField0_ |= 0x00000004;
|
||||
sourceBaseNamespaceDirPath_ = other.sourceBaseNamespaceDirPath_;
|
||||
onChanged();
|
||||
}
|
||||
if (other.hasSourceHFileArchiveDirPath()) {
|
||||
bitField0_ |= 0x00000008;
|
||||
sourceHFileArchiveDirPath_ = other.sourceHFileArchiveDirPath_;
|
||||
onChanged();
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
@ -17593,6 +17869,228 @@ public final class AdminProtos {
|
|||
return entryBuilder_;
|
||||
}
|
||||
|
||||
// optional string replicationClusterId = 2;
|
||||
private java.lang.Object replicationClusterId_ = "";
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public boolean hasReplicationClusterId() {
|
||||
return ((bitField0_ & 0x00000002) == 0x00000002);
|
||||
}
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public java.lang.String getReplicationClusterId() {
|
||||
java.lang.Object ref = replicationClusterId_;
|
||||
if (!(ref instanceof java.lang.String)) {
|
||||
java.lang.String s = ((com.google.protobuf.ByteString) ref)
|
||||
.toStringUtf8();
|
||||
replicationClusterId_ = s;
|
||||
return s;
|
||||
} else {
|
||||
return (java.lang.String) ref;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public com.google.protobuf.ByteString
|
||||
getReplicationClusterIdBytes() {
|
||||
java.lang.Object ref = replicationClusterId_;
|
||||
if (ref instanceof String) {
|
||||
com.google.protobuf.ByteString b =
|
||||
com.google.protobuf.ByteString.copyFromUtf8(
|
||||
(java.lang.String) ref);
|
||||
replicationClusterId_ = b;
|
||||
return b;
|
||||
} else {
|
||||
return (com.google.protobuf.ByteString) ref;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public Builder setReplicationClusterId(
|
||||
java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000002;
|
||||
replicationClusterId_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public Builder clearReplicationClusterId() {
|
||||
bitField0_ = (bitField0_ & ~0x00000002);
|
||||
replicationClusterId_ = getDefaultInstance().getReplicationClusterId();
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional string replicationClusterId = 2;</code>
|
||||
*/
|
||||
public Builder setReplicationClusterIdBytes(
|
||||
com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000002;
|
||||
replicationClusterId_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string sourceBaseNamespaceDirPath = 3;
|
||||
private java.lang.Object sourceBaseNamespaceDirPath_ = "";
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public boolean hasSourceBaseNamespaceDirPath() {
|
||||
return ((bitField0_ & 0x00000004) == 0x00000004);
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public java.lang.String getSourceBaseNamespaceDirPath() {
|
||||
java.lang.Object ref = sourceBaseNamespaceDirPath_;
|
||||
if (!(ref instanceof java.lang.String)) {
|
||||
java.lang.String s = ((com.google.protobuf.ByteString) ref)
|
||||
.toStringUtf8();
|
||||
sourceBaseNamespaceDirPath_ = s;
|
||||
return s;
|
||||
} else {
|
||||
return (java.lang.String) ref;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public com.google.protobuf.ByteString
|
||||
getSourceBaseNamespaceDirPathBytes() {
|
||||
java.lang.Object ref = sourceBaseNamespaceDirPath_;
|
||||
if (ref instanceof String) {
|
||||
com.google.protobuf.ByteString b =
|
||||
com.google.protobuf.ByteString.copyFromUtf8(
|
||||
(java.lang.String) ref);
|
||||
sourceBaseNamespaceDirPath_ = b;
|
||||
return b;
|
||||
} else {
|
||||
return (com.google.protobuf.ByteString) ref;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public Builder setSourceBaseNamespaceDirPath(
|
||||
java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000004;
|
||||
sourceBaseNamespaceDirPath_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public Builder clearSourceBaseNamespaceDirPath() {
|
||||
bitField0_ = (bitField0_ & ~0x00000004);
|
||||
sourceBaseNamespaceDirPath_ = getDefaultInstance().getSourceBaseNamespaceDirPath();
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceBaseNamespaceDirPath = 3;</code>
|
||||
*/
|
||||
public Builder setSourceBaseNamespaceDirPathBytes(
|
||||
com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000004;
|
||||
sourceBaseNamespaceDirPath_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional string sourceHFileArchiveDirPath = 4;
|
||||
private java.lang.Object sourceHFileArchiveDirPath_ = "";
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public boolean hasSourceHFileArchiveDirPath() {
|
||||
return ((bitField0_ & 0x00000008) == 0x00000008);
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public java.lang.String getSourceHFileArchiveDirPath() {
|
||||
java.lang.Object ref = sourceHFileArchiveDirPath_;
|
||||
if (!(ref instanceof java.lang.String)) {
|
||||
java.lang.String s = ((com.google.protobuf.ByteString) ref)
|
||||
.toStringUtf8();
|
||||
sourceHFileArchiveDirPath_ = s;
|
||||
return s;
|
||||
} else {
|
||||
return (java.lang.String) ref;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public com.google.protobuf.ByteString
|
||||
getSourceHFileArchiveDirPathBytes() {
|
||||
java.lang.Object ref = sourceHFileArchiveDirPath_;
|
||||
if (ref instanceof String) {
|
||||
com.google.protobuf.ByteString b =
|
||||
com.google.protobuf.ByteString.copyFromUtf8(
|
||||
(java.lang.String) ref);
|
||||
sourceHFileArchiveDirPath_ = b;
|
||||
return b;
|
||||
} else {
|
||||
return (com.google.protobuf.ByteString) ref;
|
||||
}
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public Builder setSourceHFileArchiveDirPath(
|
||||
java.lang.String value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000008;
|
||||
sourceHFileArchiveDirPath_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public Builder clearSourceHFileArchiveDirPath() {
|
||||
bitField0_ = (bitField0_ & ~0x00000008);
|
||||
sourceHFileArchiveDirPath_ = getDefaultInstance().getSourceHFileArchiveDirPath();
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional string sourceHFileArchiveDirPath = 4;</code>
|
||||
*/
|
||||
public Builder setSourceHFileArchiveDirPathBytes(
|
||||
com.google.protobuf.ByteString value) {
|
||||
if (value == null) {
|
||||
throw new NullPointerException();
|
||||
}
|
||||
bitField0_ |= 0x00000008;
|
||||
sourceHFileArchiveDirPath_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// @@protoc_insertion_point(builder_scope:hbase.pb.ReplicateWALEntryRequest)
|
||||
}
|
||||
|
||||
|
@ -23539,56 +24037,58 @@ public final class AdminProtos {
|
|||
"ster_system_time\030\004 \001(\004\"\026\n\024MergeRegionsRe" +
|
||||
"sponse\"a\n\010WALEntry\022\035\n\003key\030\001 \002(\0132\020.hbase." +
|
||||
"pb.WALKey\022\027\n\017key_value_bytes\030\002 \003(\014\022\035\n\025as",
|
||||
"sociated_cell_count\030\003 \001(\005\"=\n\030ReplicateWA" +
|
||||
"LEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb." +
|
||||
"WALEntry\"\033\n\031ReplicateWALEntryResponse\"\026\n" +
|
||||
"\024RollWALWriterRequest\"0\n\025RollWALWriterRe" +
|
||||
"sponse\022\027\n\017region_to_flush\030\001 \003(\014\"#\n\021StopS" +
|
||||
"erverRequest\022\016\n\006reason\030\001 \002(\t\"\024\n\022StopServ" +
|
||||
"erResponse\"\026\n\024GetServerInfoRequest\"K\n\nSe" +
|
||||
"rverInfo\022)\n\013server_name\030\001 \002(\0132\024.hbase.pb" +
|
||||
".ServerName\022\022\n\nwebui_port\030\002 \001(\r\"B\n\025GetSe" +
|
||||
"rverInfoResponse\022)\n\013server_info\030\001 \002(\0132\024.",
|
||||
"hbase.pb.ServerInfo\"\034\n\032UpdateConfigurati" +
|
||||
"onRequest\"\035\n\033UpdateConfigurationResponse" +
|
||||
"2\207\013\n\014AdminService\022P\n\rGetRegionInfo\022\036.hba" +
|
||||
"se.pb.GetRegionInfoRequest\032\037.hbase.pb.Ge" +
|
||||
"tRegionInfoResponse\022M\n\014GetStoreFile\022\035.hb" +
|
||||
"ase.pb.GetStoreFileRequest\032\036.hbase.pb.Ge" +
|
||||
"tStoreFileResponse\022V\n\017GetOnlineRegion\022 ." +
|
||||
"hbase.pb.GetOnlineRegionRequest\032!.hbase." +
|
||||
"pb.GetOnlineRegionResponse\022G\n\nOpenRegion" +
|
||||
"\022\033.hbase.pb.OpenRegionRequest\032\034.hbase.pb",
|
||||
".OpenRegionResponse\022M\n\014WarmupRegion\022\035.hb" +
|
||||
"ase.pb.WarmupRegionRequest\032\036.hbase.pb.Wa" +
|
||||
"rmupRegionResponse\022J\n\013CloseRegion\022\034.hbas" +
|
||||
"e.pb.CloseRegionRequest\032\035.hbase.pb.Close" +
|
||||
"RegionResponse\022J\n\013FlushRegion\022\034.hbase.pb" +
|
||||
".FlushRegionRequest\032\035.hbase.pb.FlushRegi" +
|
||||
"onResponse\022J\n\013SplitRegion\022\034.hbase.pb.Spl" +
|
||||
"itRegionRequest\032\035.hbase.pb.SplitRegionRe" +
|
||||
"sponse\022P\n\rCompactRegion\022\036.hbase.pb.Compa" +
|
||||
"ctRegionRequest\032\037.hbase.pb.CompactRegion",
|
||||
"Response\022M\n\014MergeRegions\022\035.hbase.pb.Merg" +
|
||||
"eRegionsRequest\032\036.hbase.pb.MergeRegionsR" +
|
||||
"esponse\022\\\n\021ReplicateWALEntry\022\".hbase.pb." +
|
||||
"ReplicateWALEntryRequest\032#.hbase.pb.Repl" +
|
||||
"icateWALEntryResponse\022Q\n\006Replay\022\".hbase." +
|
||||
"pb.ReplicateWALEntryRequest\032#.hbase.pb.R" +
|
||||
"eplicateWALEntryResponse\022P\n\rRollWALWrite" +
|
||||
"r\022\036.hbase.pb.RollWALWriterRequest\032\037.hbas" +
|
||||
"e.pb.RollWALWriterResponse\022P\n\rGetServerI" +
|
||||
"nfo\022\036.hbase.pb.GetServerInfoRequest\032\037.hb",
|
||||
"ase.pb.GetServerInfoResponse\022G\n\nStopServ" +
|
||||
"er\022\033.hbase.pb.StopServerRequest\032\034.hbase." +
|
||||
"pb.StopServerResponse\022_\n\022UpdateFavoredNo" +
|
||||
"des\022#.hbase.pb.UpdateFavoredNodesRequest" +
|
||||
"\032$.hbase.pb.UpdateFavoredNodesResponse\022b" +
|
||||
"\n\023UpdateConfiguration\022$.hbase.pb.UpdateC" +
|
||||
"onfigurationRequest\032%.hbase.pb.UpdateCon" +
|
||||
"figurationResponseBA\n*org.apache.hadoop." +
|
||||
"hbase.protobuf.generatedB\013AdminProtosH\001\210" +
|
||||
"\001\001\240\001\001"
|
||||
"sociated_cell_count\030\003 \001(\005\"\242\001\n\030ReplicateW" +
|
||||
"ALEntryRequest\022!\n\005entry\030\001 \003(\0132\022.hbase.pb" +
|
||||
".WALEntry\022\034\n\024replicationClusterId\030\002 \001(\t\022" +
|
||||
"\"\n\032sourceBaseNamespaceDirPath\030\003 \001(\t\022!\n\031s" +
|
||||
"ourceHFileArchiveDirPath\030\004 \001(\t\"\033\n\031Replic" +
|
||||
"ateWALEntryResponse\"\026\n\024RollWALWriterRequ" +
|
||||
"est\"0\n\025RollWALWriterResponse\022\027\n\017region_t" +
|
||||
"o_flush\030\001 \003(\014\"#\n\021StopServerRequest\022\016\n\006re" +
|
||||
"ason\030\001 \002(\t\"\024\n\022StopServerResponse\"\026\n\024GetS" +
|
||||
"erverInfoRequest\"K\n\nServerInfo\022)\n\013server",
|
||||
"_name\030\001 \002(\0132\024.hbase.pb.ServerName\022\022\n\nweb" +
|
||||
"ui_port\030\002 \001(\r\"B\n\025GetServerInfoResponse\022)" +
|
||||
"\n\013server_info\030\001 \002(\0132\024.hbase.pb.ServerInf" +
|
||||
"o\"\034\n\032UpdateConfigurationRequest\"\035\n\033Updat" +
|
||||
"eConfigurationResponse2\207\013\n\014AdminService\022" +
|
||||
"P\n\rGetRegionInfo\022\036.hbase.pb.GetRegionInf" +
|
||||
"oRequest\032\037.hbase.pb.GetRegionInfoRespons" +
|
||||
"e\022M\n\014GetStoreFile\022\035.hbase.pb.GetStoreFil" +
|
||||
"eRequest\032\036.hbase.pb.GetStoreFileResponse" +
|
||||
"\022V\n\017GetOnlineRegion\022 .hbase.pb.GetOnline",
|
||||
"RegionRequest\032!.hbase.pb.GetOnlineRegion" +
|
||||
"Response\022G\n\nOpenRegion\022\033.hbase.pb.OpenRe" +
|
||||
"gionRequest\032\034.hbase.pb.OpenRegionRespons" +
|
||||
"e\022M\n\014WarmupRegion\022\035.hbase.pb.WarmupRegio" +
|
||||
"nRequest\032\036.hbase.pb.WarmupRegionResponse" +
|
||||
"\022J\n\013CloseRegion\022\034.hbase.pb.CloseRegionRe" +
|
||||
"quest\032\035.hbase.pb.CloseRegionResponse\022J\n\013" +
|
||||
"FlushRegion\022\034.hbase.pb.FlushRegionReques" +
|
||||
"t\032\035.hbase.pb.FlushRegionResponse\022J\n\013Spli" +
|
||||
"tRegion\022\034.hbase.pb.SplitRegionRequest\032\035.",
|
||||
"hbase.pb.SplitRegionResponse\022P\n\rCompactR" +
|
||||
"egion\022\036.hbase.pb.CompactRegionRequest\032\037." +
|
||||
"hbase.pb.CompactRegionResponse\022M\n\014MergeR" +
|
||||
"egions\022\035.hbase.pb.MergeRegionsRequest\032\036." +
|
||||
"hbase.pb.MergeRegionsResponse\022\\\n\021Replica" +
|
||||
"teWALEntry\022\".hbase.pb.ReplicateWALEntryR" +
|
||||
"equest\032#.hbase.pb.ReplicateWALEntryRespo" +
|
||||
"nse\022Q\n\006Replay\022\".hbase.pb.ReplicateWALEnt" +
|
||||
"ryRequest\032#.hbase.pb.ReplicateWALEntryRe" +
|
||||
"sponse\022P\n\rRollWALWriter\022\036.hbase.pb.RollW",
|
||||
"ALWriterRequest\032\037.hbase.pb.RollWALWriter" +
|
||||
"Response\022P\n\rGetServerInfo\022\036.hbase.pb.Get" +
|
||||
"ServerInfoRequest\032\037.hbase.pb.GetServerIn" +
|
||||
"foResponse\022G\n\nStopServer\022\033.hbase.pb.Stop" +
|
||||
"ServerRequest\032\034.hbase.pb.StopServerRespo" +
|
||||
"nse\022_\n\022UpdateFavoredNodes\022#.hbase.pb.Upd" +
|
||||
"ateFavoredNodesRequest\032$.hbase.pb.Update" +
|
||||
"FavoredNodesResponse\022b\n\023UpdateConfigurat" +
|
||||
"ion\022$.hbase.pb.UpdateConfigurationReques" +
|
||||
"t\032%.hbase.pb.UpdateConfigurationResponse",
|
||||
"BA\n*org.apache.hadoop.hbase.protobuf.gen" +
|
||||
"eratedB\013AdminProtosH\001\210\001\001\240\001\001"
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
|
@ -23750,7 +24250,7 @@ public final class AdminProtos {
|
|||
internal_static_hbase_pb_ReplicateWALEntryRequest_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_hbase_pb_ReplicateWALEntryRequest_descriptor,
|
||||
new java.lang.String[] { "Entry", });
|
||||
new java.lang.String[] { "Entry", "ReplicationClusterId", "SourceBaseNamespaceDirPath", "SourceHFileArchiveDirPath", });
|
||||
internal_static_hbase_pb_ReplicateWALEntryResponse_descriptor =
|
||||
getDescriptor().getMessageTypes().get(24);
|
||||
internal_static_hbase_pb_ReplicateWALEntryResponse_fieldAccessorTable = new
|
||||
|
|
|
@ -17303,6 +17303,16 @@ public final class ClientProtos {
|
|||
* <code>optional bool track_scan_metrics = 9;</code>
|
||||
*/
|
||||
boolean getTrackScanMetrics();
|
||||
|
||||
// optional bool renew = 10 [default = false];
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
boolean hasRenew();
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
boolean getRenew();
|
||||
}
|
||||
/**
|
||||
* Protobuf type {@code hbase.pb.ScanRequest}
|
||||
|
@ -17429,6 +17439,11 @@ public final class ClientProtos {
|
|||
trackScanMetrics_ = input.readBool();
|
||||
break;
|
||||
}
|
||||
case 80: {
|
||||
bitField0_ |= 0x00000200;
|
||||
renew_ = input.readBool();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
||||
|
@ -17625,6 +17640,22 @@ public final class ClientProtos {
|
|||
return trackScanMetrics_;
|
||||
}
|
||||
|
||||
// optional bool renew = 10 [default = false];
|
||||
public static final int RENEW_FIELD_NUMBER = 10;
|
||||
private boolean renew_;
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
public boolean hasRenew() {
|
||||
return ((bitField0_ & 0x00000200) == 0x00000200);
|
||||
}
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
public boolean getRenew() {
|
||||
return renew_;
|
||||
}
|
||||
|
||||
private void initFields() {
|
||||
region_ = org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.RegionSpecifier.getDefaultInstance();
|
||||
scan_ = org.apache.hadoop.hbase.protobuf.generated.ClientProtos.Scan.getDefaultInstance();
|
||||
|
@ -17635,6 +17666,7 @@ public final class ClientProtos {
|
|||
clientHandlesPartials_ = false;
|
||||
clientHandlesHeartbeats_ = false;
|
||||
trackScanMetrics_ = false;
|
||||
renew_ = false;
|
||||
}
|
||||
private byte memoizedIsInitialized = -1;
|
||||
public final boolean isInitialized() {
|
||||
|
@ -17687,6 +17719,9 @@ public final class ClientProtos {
|
|||
if (((bitField0_ & 0x00000100) == 0x00000100)) {
|
||||
output.writeBool(9, trackScanMetrics_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
||||
output.writeBool(10, renew_);
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
|
@ -17732,6 +17767,10 @@ public final class ClientProtos {
|
|||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBoolSize(9, trackScanMetrics_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000200) == 0x00000200)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBoolSize(10, renew_);
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
|
@ -17800,6 +17839,11 @@ public final class ClientProtos {
|
|||
result = result && (getTrackScanMetrics()
|
||||
== other.getTrackScanMetrics());
|
||||
}
|
||||
result = result && (hasRenew() == other.hasRenew());
|
||||
if (hasRenew()) {
|
||||
result = result && (getRenew()
|
||||
== other.getRenew());
|
||||
}
|
||||
result = result &&
|
||||
getUnknownFields().equals(other.getUnknownFields());
|
||||
return result;
|
||||
|
@ -17849,6 +17893,10 @@ public final class ClientProtos {
|
|||
hash = (37 * hash) + TRACK_SCAN_METRICS_FIELD_NUMBER;
|
||||
hash = (53 * hash) + hashBoolean(getTrackScanMetrics());
|
||||
}
|
||||
if (hasRenew()) {
|
||||
hash = (37 * hash) + RENEW_FIELD_NUMBER;
|
||||
hash = (53 * hash) + hashBoolean(getRenew());
|
||||
}
|
||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||
memoizedHashCode = hash;
|
||||
return hash;
|
||||
|
@ -17999,6 +18047,8 @@ public final class ClientProtos {
|
|||
bitField0_ = (bitField0_ & ~0x00000080);
|
||||
trackScanMetrics_ = false;
|
||||
bitField0_ = (bitField0_ & ~0x00000100);
|
||||
renew_ = false;
|
||||
bitField0_ = (bitField0_ & ~0x00000200);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -18071,6 +18121,10 @@ public final class ClientProtos {
|
|||
to_bitField0_ |= 0x00000100;
|
||||
}
|
||||
result.trackScanMetrics_ = trackScanMetrics_;
|
||||
if (((from_bitField0_ & 0x00000200) == 0x00000200)) {
|
||||
to_bitField0_ |= 0x00000200;
|
||||
}
|
||||
result.renew_ = renew_;
|
||||
result.bitField0_ = to_bitField0_;
|
||||
onBuilt();
|
||||
return result;
|
||||
|
@ -18114,6 +18168,9 @@ public final class ClientProtos {
|
|||
if (other.hasTrackScanMetrics()) {
|
||||
setTrackScanMetrics(other.getTrackScanMetrics());
|
||||
}
|
||||
if (other.hasRenew()) {
|
||||
setRenew(other.getRenew());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
@ -18618,6 +18675,39 @@ public final class ClientProtos {
|
|||
return this;
|
||||
}
|
||||
|
||||
// optional bool renew = 10 [default = false];
|
||||
private boolean renew_ ;
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
public boolean hasRenew() {
|
||||
return ((bitField0_ & 0x00000200) == 0x00000200);
|
||||
}
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
public boolean getRenew() {
|
||||
return renew_;
|
||||
}
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
public Builder setRenew(boolean value) {
|
||||
bitField0_ |= 0x00000200;
|
||||
renew_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional bool renew = 10 [default = false];</code>
|
||||
*/
|
||||
public Builder clearRenew() {
|
||||
bitField0_ = (bitField0_ & ~0x00000200);
|
||||
renew_ = false;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// @@protoc_insertion_point(builder_scope:hbase.pb.ScanRequest)
|
||||
}
|
||||
|
||||
|
@ -34289,76 +34379,76 @@ public final class ClientProtos {
|
|||
"\025.hbase.pb.Consistency:\006STRONG\022\017\n\007cachin" +
|
||||
"g\030\021 \001(\r\022\035\n\025allow_partial_results\030\022 \001(\010\0226" +
|
||||
"\n\rcf_time_range\030\023 \003(\0132\037.hbase.pb.ColumnF" +
|
||||
"amilyTimeRange\"\220\002\n\013ScanRequest\022)\n\006region",
|
||||
"amilyTimeRange\"\246\002\n\013ScanRequest\022)\n\006region",
|
||||
"\030\001 \001(\0132\031.hbase.pb.RegionSpecifier\022\034\n\004sca" +
|
||||
"n\030\002 \001(\0132\016.hbase.pb.Scan\022\022\n\nscanner_id\030\003 " +
|
||||
"\001(\004\022\026\n\016number_of_rows\030\004 \001(\r\022\025\n\rclose_sca" +
|
||||
"nner\030\005 \001(\010\022\025\n\rnext_call_seq\030\006 \001(\004\022\037\n\027cli" +
|
||||
"ent_handles_partials\030\007 \001(\010\022!\n\031client_han" +
|
||||
"dles_heartbeats\030\010 \001(\010\022\032\n\022track_scan_metr" +
|
||||
"ics\030\t \001(\010\"\232\002\n\014ScanResponse\022\030\n\020cells_per_" +
|
||||
"result\030\001 \003(\r\022\022\n\nscanner_id\030\002 \001(\004\022\024\n\014more" +
|
||||
"_results\030\003 \001(\010\022\013\n\003ttl\030\004 \001(\r\022!\n\007results\030\005" +
|
||||
" \003(\0132\020.hbase.pb.Result\022\r\n\005stale\030\006 \001(\010\022\037\n",
|
||||
"\027partial_flag_per_result\030\007 \003(\010\022\036\n\026more_r" +
|
||||
"esults_in_region\030\010 \001(\010\022\031\n\021heartbeat_mess" +
|
||||
"age\030\t \001(\010\022+\n\014scan_metrics\030\n \001(\0132\025.hbase." +
|
||||
"pb.ScanMetrics\"\305\001\n\024BulkLoadHFileRequest\022" +
|
||||
")\n\006region\030\001 \002(\0132\031.hbase.pb.RegionSpecifi" +
|
||||
"er\022>\n\013family_path\030\002 \003(\0132).hbase.pb.BulkL" +
|
||||
"oadHFileRequest.FamilyPath\022\026\n\016assign_seq" +
|
||||
"_num\030\003 \001(\010\032*\n\nFamilyPath\022\016\n\006family\030\001 \002(\014" +
|
||||
"\022\014\n\004path\030\002 \002(\t\"\'\n\025BulkLoadHFileResponse\022" +
|
||||
"\016\n\006loaded\030\001 \002(\010\"a\n\026CoprocessorServiceCal",
|
||||
"l\022\013\n\003row\030\001 \002(\014\022\024\n\014service_name\030\002 \002(\t\022\023\n\013" +
|
||||
"method_name\030\003 \002(\t\022\017\n\007request\030\004 \002(\014\"B\n\030Co" +
|
||||
"processorServiceResult\022&\n\005value\030\001 \001(\0132\027." +
|
||||
"hbase.pb.NameBytesPair\"v\n\031CoprocessorSer" +
|
||||
"viceRequest\022)\n\006region\030\001 \002(\0132\031.hbase.pb.R" +
|
||||
"egionSpecifier\022.\n\004call\030\002 \002(\0132 .hbase.pb." +
|
||||
"CoprocessorServiceCall\"o\n\032CoprocessorSer" +
|
||||
"viceResponse\022)\n\006region\030\001 \002(\0132\031.hbase.pb." +
|
||||
"RegionSpecifier\022&\n\005value\030\002 \002(\0132\027.hbase.p" +
|
||||
"b.NameBytesPair\"\226\001\n\006Action\022\r\n\005index\030\001 \001(",
|
||||
"\r\022)\n\010mutation\030\002 \001(\0132\027.hbase.pb.MutationP" +
|
||||
"roto\022\032\n\003get\030\003 \001(\0132\r.hbase.pb.Get\0226\n\014serv" +
|
||||
"ice_call\030\004 \001(\0132 .hbase.pb.CoprocessorSer" +
|
||||
"viceCall\"k\n\014RegionAction\022)\n\006region\030\001 \002(\013" +
|
||||
"2\031.hbase.pb.RegionSpecifier\022\016\n\006atomic\030\002 " +
|
||||
"\001(\010\022 \n\006action\030\003 \003(\0132\020.hbase.pb.Action\"c\n" +
|
||||
"\017RegionLoadStats\022\027\n\014memstoreLoad\030\001 \001(\005:\001" +
|
||||
"0\022\030\n\rheapOccupancy\030\002 \001(\005:\0010\022\035\n\022compactio" +
|
||||
"nPressure\030\003 \001(\005:\0010\"\332\001\n\021ResultOrException" +
|
||||
"\022\r\n\005index\030\001 \001(\r\022 \n\006result\030\002 \001(\0132\020.hbase.",
|
||||
"pb.Result\022*\n\texception\030\003 \001(\0132\027.hbase.pb." +
|
||||
"NameBytesPair\022:\n\016service_result\030\004 \001(\0132\"." +
|
||||
"hbase.pb.CoprocessorServiceResult\022,\n\tloa" +
|
||||
"dStats\030\005 \001(\0132\031.hbase.pb.RegionLoadStats\"" +
|
||||
"x\n\022RegionActionResult\0226\n\021resultOrExcepti" +
|
||||
"on\030\001 \003(\0132\033.hbase.pb.ResultOrException\022*\n" +
|
||||
"\texception\030\002 \001(\0132\027.hbase.pb.NameBytesPai" +
|
||||
"r\"x\n\014MultiRequest\022,\n\014regionAction\030\001 \003(\0132" +
|
||||
"\026.hbase.pb.RegionAction\022\022\n\nnonceGroup\030\002 " +
|
||||
"\001(\004\022&\n\tcondition\030\003 \001(\0132\023.hbase.pb.Condit",
|
||||
"ion\"\\\n\rMultiResponse\0228\n\022regionActionResu" +
|
||||
"lt\030\001 \003(\0132\034.hbase.pb.RegionActionResult\022\021" +
|
||||
"\n\tprocessed\030\002 \001(\010*\'\n\013Consistency\022\n\n\006STRO" +
|
||||
"NG\020\000\022\014\n\010TIMELINE\020\0012\203\004\n\rClientService\0222\n\003" +
|
||||
"Get\022\024.hbase.pb.GetRequest\032\025.hbase.pb.Get" +
|
||||
"Response\022;\n\006Mutate\022\027.hbase.pb.MutateRequ" +
|
||||
"est\032\030.hbase.pb.MutateResponse\0225\n\004Scan\022\025." +
|
||||
"hbase.pb.ScanRequest\032\026.hbase.pb.ScanResp" +
|
||||
"onse\022P\n\rBulkLoadHFile\022\036.hbase.pb.BulkLoa" +
|
||||
"dHFileRequest\032\037.hbase.pb.BulkLoadHFileRe",
|
||||
"sponse\022X\n\013ExecService\022#.hbase.pb.Coproce" +
|
||||
"ssorServiceRequest\032$.hbase.pb.Coprocesso" +
|
||||
"rServiceResponse\022d\n\027ExecRegionServerServ" +
|
||||
"ics\030\t \001(\010\022\024\n\005renew\030\n \001(\010:\005false\"\232\002\n\014Scan" +
|
||||
"Response\022\030\n\020cells_per_result\030\001 \003(\r\022\022\n\nsc" +
|
||||
"anner_id\030\002 \001(\004\022\024\n\014more_results\030\003 \001(\010\022\013\n\003" +
|
||||
"ttl\030\004 \001(\r\022!\n\007results\030\005 \003(\0132\020.hbase.pb.Re",
|
||||
"sult\022\r\n\005stale\030\006 \001(\010\022\037\n\027partial_flag_per_" +
|
||||
"result\030\007 \003(\010\022\036\n\026more_results_in_region\030\010" +
|
||||
" \001(\010\022\031\n\021heartbeat_message\030\t \001(\010\022+\n\014scan_" +
|
||||
"metrics\030\n \001(\0132\025.hbase.pb.ScanMetrics\"\305\001\n" +
|
||||
"\024BulkLoadHFileRequest\022)\n\006region\030\001 \002(\0132\031." +
|
||||
"hbase.pb.RegionSpecifier\022>\n\013family_path\030" +
|
||||
"\002 \003(\0132).hbase.pb.BulkLoadHFileRequest.Fa" +
|
||||
"milyPath\022\026\n\016assign_seq_num\030\003 \001(\010\032*\n\nFami" +
|
||||
"lyPath\022\016\n\006family\030\001 \002(\014\022\014\n\004path\030\002 \002(\t\"\'\n\025" +
|
||||
"BulkLoadHFileResponse\022\016\n\006loaded\030\001 \002(\010\"a\n",
|
||||
"\026CoprocessorServiceCall\022\013\n\003row\030\001 \002(\014\022\024\n\014" +
|
||||
"service_name\030\002 \002(\t\022\023\n\013method_name\030\003 \002(\t\022" +
|
||||
"\017\n\007request\030\004 \002(\014\"B\n\030CoprocessorServiceRe" +
|
||||
"sult\022&\n\005value\030\001 \001(\0132\027.hbase.pb.NameBytes" +
|
||||
"Pair\"v\n\031CoprocessorServiceRequest\022)\n\006reg" +
|
||||
"ion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022.\n\004" +
|
||||
"call\030\002 \002(\0132 .hbase.pb.CoprocessorService" +
|
||||
"Call\"o\n\032CoprocessorServiceResponse\022)\n\006re" +
|
||||
"gion\030\001 \002(\0132\031.hbase.pb.RegionSpecifier\022&\n" +
|
||||
"\005value\030\002 \002(\0132\027.hbase.pb.NameBytesPair\"\226\001",
|
||||
"\n\006Action\022\r\n\005index\030\001 \001(\r\022)\n\010mutation\030\002 \001(" +
|
||||
"\0132\027.hbase.pb.MutationProto\022\032\n\003get\030\003 \001(\0132" +
|
||||
"\r.hbase.pb.Get\0226\n\014service_call\030\004 \001(\0132 .h" +
|
||||
"base.pb.CoprocessorServiceCall\"k\n\014Region" +
|
||||
"Action\022)\n\006region\030\001 \002(\0132\031.hbase.pb.Region" +
|
||||
"Specifier\022\016\n\006atomic\030\002 \001(\010\022 \n\006action\030\003 \003(" +
|
||||
"\0132\020.hbase.pb.Action\"c\n\017RegionLoadStats\022\027" +
|
||||
"\n\014memstoreLoad\030\001 \001(\005:\0010\022\030\n\rheapOccupancy" +
|
||||
"\030\002 \001(\005:\0010\022\035\n\022compactionPressure\030\003 \001(\005:\0010" +
|
||||
"\"\332\001\n\021ResultOrException\022\r\n\005index\030\001 \001(\r\022 \n",
|
||||
"\006result\030\002 \001(\0132\020.hbase.pb.Result\022*\n\texcep" +
|
||||
"tion\030\003 \001(\0132\027.hbase.pb.NameBytesPair\022:\n\016s" +
|
||||
"ervice_result\030\004 \001(\0132\".hbase.pb.Coprocess" +
|
||||
"orServiceResult\022,\n\tloadStats\030\005 \001(\0132\031.hba" +
|
||||
"se.pb.RegionLoadStats\"x\n\022RegionActionRes" +
|
||||
"ult\0226\n\021resultOrException\030\001 \003(\0132\033.hbase.p" +
|
||||
"b.ResultOrException\022*\n\texception\030\002 \001(\0132\027" +
|
||||
".hbase.pb.NameBytesPair\"x\n\014MultiRequest\022" +
|
||||
",\n\014regionAction\030\001 \003(\0132\026.hbase.pb.RegionA" +
|
||||
"ction\022\022\n\nnonceGroup\030\002 \001(\004\022&\n\tcondition\030\003",
|
||||
" \001(\0132\023.hbase.pb.Condition\"\\\n\rMultiRespon" +
|
||||
"se\0228\n\022regionActionResult\030\001 \003(\0132\034.hbase.p" +
|
||||
"b.RegionActionResult\022\021\n\tprocessed\030\002 \001(\010*" +
|
||||
"\'\n\013Consistency\022\n\n\006STRONG\020\000\022\014\n\010TIMELINE\020\001" +
|
||||
"2\203\004\n\rClientService\0222\n\003Get\022\024.hbase.pb.Get" +
|
||||
"Request\032\025.hbase.pb.GetResponse\022;\n\006Mutate" +
|
||||
"\022\027.hbase.pb.MutateRequest\032\030.hbase.pb.Mut" +
|
||||
"ateResponse\0225\n\004Scan\022\025.hbase.pb.ScanReque" +
|
||||
"st\032\026.hbase.pb.ScanResponse\022P\n\rBulkLoadHF" +
|
||||
"ile\022\036.hbase.pb.BulkLoadHFileRequest\032\037.hb",
|
||||
"ase.pb.BulkLoadHFileResponse\022X\n\013ExecServ" +
|
||||
"ice\022#.hbase.pb.CoprocessorServiceRequest" +
|
||||
"\032$.hbase.pb.CoprocessorServiceResponse\0228" +
|
||||
"\n\005Multi\022\026.hbase.pb.MultiRequest\032\027.hbase." +
|
||||
"pb.MultiResponseBB\n*org.apache.hadoop.hb" +
|
||||
"ase.protobuf.generatedB\014ClientProtosH\001\210\001" +
|
||||
"\001\240\001\001"
|
||||
"\032$.hbase.pb.CoprocessorServiceResponse\022d" +
|
||||
"\n\027ExecRegionServerService\022#.hbase.pb.Cop" +
|
||||
"rocessorServiceRequest\032$.hbase.pb.Coproc" +
|
||||
"essorServiceResponse\0228\n\005Multi\022\026.hbase.pb" +
|
||||
".MultiRequest\032\027.hbase.pb.MultiResponseBB" +
|
||||
"\n*org.apache.hadoop.hbase.protobuf.gener" +
|
||||
"atedB\014ClientProtosH\001\210\001\001\240\001\001"
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
|
@ -34454,7 +34544,7 @@ public final class ClientProtos {
|
|||
internal_static_hbase_pb_ScanRequest_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_hbase_pb_ScanRequest_descriptor,
|
||||
new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", "ClientHandlesHeartbeats", "TrackScanMetrics", });
|
||||
new java.lang.String[] { "Region", "Scan", "ScannerId", "NumberOfRows", "CloseScanner", "NextCallSeq", "ClientHandlesPartials", "ClientHandlesHeartbeats", "TrackScanMetrics", "Renew", });
|
||||
internal_static_hbase_pb_ScanResponse_descriptor =
|
||||
getDescriptor().getMessageTypes().get(13);
|
||||
internal_static_hbase_pb_ScanResponse_fieldAccessorTable = new
|
||||
|
|
|
@ -16798,6 +16798,26 @@ public final class HBaseProtos {
|
|||
*/
|
||||
com.google.protobuf.ByteString
|
||||
getSrcChecksumBytes();
|
||||
|
||||
// optional uint32 version_major = 7;
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
boolean hasVersionMajor();
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
int getVersionMajor();
|
||||
|
||||
// optional uint32 version_minor = 8;
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
boolean hasVersionMinor();
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
int getVersionMinor();
|
||||
}
|
||||
/**
|
||||
* Protobuf type {@code hbase.pb.VersionInfo}
|
||||
|
@ -16884,6 +16904,16 @@ public final class HBaseProtos {
|
|||
srcChecksum_ = input.readBytes();
|
||||
break;
|
||||
}
|
||||
case 56: {
|
||||
bitField0_ |= 0x00000040;
|
||||
versionMajor_ = input.readUInt32();
|
||||
break;
|
||||
}
|
||||
case 64: {
|
||||
bitField0_ |= 0x00000080;
|
||||
versionMinor_ = input.readUInt32();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (com.google.protobuf.InvalidProtocolBufferException e) {
|
||||
|
@ -17182,6 +17212,38 @@ public final class HBaseProtos {
|
|||
}
|
||||
}
|
||||
|
||||
// optional uint32 version_major = 7;
|
||||
public static final int VERSION_MAJOR_FIELD_NUMBER = 7;
|
||||
private int versionMajor_;
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
public boolean hasVersionMajor() {
|
||||
return ((bitField0_ & 0x00000040) == 0x00000040);
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
public int getVersionMajor() {
|
||||
return versionMajor_;
|
||||
}
|
||||
|
||||
// optional uint32 version_minor = 8;
|
||||
public static final int VERSION_MINOR_FIELD_NUMBER = 8;
|
||||
private int versionMinor_;
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
public boolean hasVersionMinor() {
|
||||
return ((bitField0_ & 0x00000080) == 0x00000080);
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
public int getVersionMinor() {
|
||||
return versionMinor_;
|
||||
}
|
||||
|
||||
private void initFields() {
|
||||
version_ = "";
|
||||
url_ = "";
|
||||
|
@ -17189,6 +17251,8 @@ public final class HBaseProtos {
|
|||
user_ = "";
|
||||
date_ = "";
|
||||
srcChecksum_ = "";
|
||||
versionMajor_ = 0;
|
||||
versionMinor_ = 0;
|
||||
}
|
||||
private byte memoizedIsInitialized = -1;
|
||||
public final boolean isInitialized() {
|
||||
|
@ -17244,6 +17308,12 @@ public final class HBaseProtos {
|
|||
if (((bitField0_ & 0x00000020) == 0x00000020)) {
|
||||
output.writeBytes(6, getSrcChecksumBytes());
|
||||
}
|
||||
if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
||||
output.writeUInt32(7, versionMajor_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
||||
output.writeUInt32(8, versionMinor_);
|
||||
}
|
||||
getUnknownFields().writeTo(output);
|
||||
}
|
||||
|
||||
|
@ -17277,6 +17347,14 @@ public final class HBaseProtos {
|
|||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeBytesSize(6, getSrcChecksumBytes());
|
||||
}
|
||||
if (((bitField0_ & 0x00000040) == 0x00000040)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeUInt32Size(7, versionMajor_);
|
||||
}
|
||||
if (((bitField0_ & 0x00000080) == 0x00000080)) {
|
||||
size += com.google.protobuf.CodedOutputStream
|
||||
.computeUInt32Size(8, versionMinor_);
|
||||
}
|
||||
size += getUnknownFields().getSerializedSize();
|
||||
memoizedSerializedSize = size;
|
||||
return size;
|
||||
|
@ -17330,6 +17408,16 @@ public final class HBaseProtos {
|
|||
result = result && getSrcChecksum()
|
||||
.equals(other.getSrcChecksum());
|
||||
}
|
||||
result = result && (hasVersionMajor() == other.hasVersionMajor());
|
||||
if (hasVersionMajor()) {
|
||||
result = result && (getVersionMajor()
|
||||
== other.getVersionMajor());
|
||||
}
|
||||
result = result && (hasVersionMinor() == other.hasVersionMinor());
|
||||
if (hasVersionMinor()) {
|
||||
result = result && (getVersionMinor()
|
||||
== other.getVersionMinor());
|
||||
}
|
||||
result = result &&
|
||||
getUnknownFields().equals(other.getUnknownFields());
|
||||
return result;
|
||||
|
@ -17367,6 +17455,14 @@ public final class HBaseProtos {
|
|||
hash = (37 * hash) + SRC_CHECKSUM_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getSrcChecksum().hashCode();
|
||||
}
|
||||
if (hasVersionMajor()) {
|
||||
hash = (37 * hash) + VERSION_MAJOR_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getVersionMajor();
|
||||
}
|
||||
if (hasVersionMinor()) {
|
||||
hash = (37 * hash) + VERSION_MINOR_FIELD_NUMBER;
|
||||
hash = (53 * hash) + getVersionMinor();
|
||||
}
|
||||
hash = (29 * hash) + getUnknownFields().hashCode();
|
||||
memoizedHashCode = hash;
|
||||
return hash;
|
||||
|
@ -17492,6 +17588,10 @@ public final class HBaseProtos {
|
|||
bitField0_ = (bitField0_ & ~0x00000010);
|
||||
srcChecksum_ = "";
|
||||
bitField0_ = (bitField0_ & ~0x00000020);
|
||||
versionMajor_ = 0;
|
||||
bitField0_ = (bitField0_ & ~0x00000040);
|
||||
versionMinor_ = 0;
|
||||
bitField0_ = (bitField0_ & ~0x00000080);
|
||||
return this;
|
||||
}
|
||||
|
||||
|
@ -17544,6 +17644,14 @@ public final class HBaseProtos {
|
|||
to_bitField0_ |= 0x00000020;
|
||||
}
|
||||
result.srcChecksum_ = srcChecksum_;
|
||||
if (((from_bitField0_ & 0x00000040) == 0x00000040)) {
|
||||
to_bitField0_ |= 0x00000040;
|
||||
}
|
||||
result.versionMajor_ = versionMajor_;
|
||||
if (((from_bitField0_ & 0x00000080) == 0x00000080)) {
|
||||
to_bitField0_ |= 0x00000080;
|
||||
}
|
||||
result.versionMinor_ = versionMinor_;
|
||||
result.bitField0_ = to_bitField0_;
|
||||
onBuilt();
|
||||
return result;
|
||||
|
@ -17590,6 +17698,12 @@ public final class HBaseProtos {
|
|||
srcChecksum_ = other.srcChecksum_;
|
||||
onChanged();
|
||||
}
|
||||
if (other.hasVersionMajor()) {
|
||||
setVersionMajor(other.getVersionMajor());
|
||||
}
|
||||
if (other.hasVersionMinor()) {
|
||||
setVersionMinor(other.getVersionMinor());
|
||||
}
|
||||
this.mergeUnknownFields(other.getUnknownFields());
|
||||
return this;
|
||||
}
|
||||
|
@ -18085,6 +18199,72 @@ public final class HBaseProtos {
|
|||
return this;
|
||||
}
|
||||
|
||||
// optional uint32 version_major = 7;
|
||||
private int versionMajor_ ;
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
public boolean hasVersionMajor() {
|
||||
return ((bitField0_ & 0x00000040) == 0x00000040);
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
public int getVersionMajor() {
|
||||
return versionMajor_;
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
public Builder setVersionMajor(int value) {
|
||||
bitField0_ |= 0x00000040;
|
||||
versionMajor_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_major = 7;</code>
|
||||
*/
|
||||
public Builder clearVersionMajor() {
|
||||
bitField0_ = (bitField0_ & ~0x00000040);
|
||||
versionMajor_ = 0;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// optional uint32 version_minor = 8;
|
||||
private int versionMinor_ ;
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
public boolean hasVersionMinor() {
|
||||
return ((bitField0_ & 0x00000080) == 0x00000080);
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
public int getVersionMinor() {
|
||||
return versionMinor_;
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
public Builder setVersionMinor(int value) {
|
||||
bitField0_ |= 0x00000080;
|
||||
versionMinor_ = value;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
/**
|
||||
* <code>optional uint32 version_minor = 8;</code>
|
||||
*/
|
||||
public Builder clearVersionMinor() {
|
||||
bitField0_ = (bitField0_ & ~0x00000080);
|
||||
versionMinor_ = 0;
|
||||
onChanged();
|
||||
return this;
|
||||
}
|
||||
|
||||
// @@protoc_insertion_point(builder_scope:hbase.pb.VersionInfo)
|
||||
}
|
||||
|
||||
|
@ -18927,20 +19107,21 @@ public final class HBaseProtos {
|
|||
"al_msg\030\001 \002(\014\"5\n\004UUID\022\026\n\016least_sig_bits\030\001" +
|
||||
" \002(\004\022\025\n\rmost_sig_bits\030\002 \002(\004\"T\n\023Namespace" +
|
||||
"Descriptor\022\014\n\004name\030\001 \002(\014\022/\n\rconfiguratio" +
|
||||
"n\030\002 \003(\0132\030.hbase.pb.NameStringPair\"o\n\013Ver" +
|
||||
"sionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022\020" +
|
||||
"\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004date\030\005" +
|
||||
" \002(\t\022\024\n\014src_checksum\030\006 \002(\t\"Q\n\020RegionServ" +
|
||||
"erInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014version_info",
|
||||
"\030\002 \001(\0132\025.hbase.pb.VersionInfo*r\n\013Compare" +
|
||||
"Type\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001\022\t\n\005EQU" +
|
||||
"AL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR_EQUAL\020" +
|
||||
"\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010TimeUnit\022\017\n" +
|
||||
"\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022\020\n\014MILL" +
|
||||
"ISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTES\020\005\022\t\n\005" +
|
||||
"HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.hadoop.h" +
|
||||
"base.protobuf.generatedB\013HBaseProtosH\001\240\001" +
|
||||
"\001"
|
||||
"n\030\002 \003(\0132\030.hbase.pb.NameStringPair\"\235\001\n\013Ve" +
|
||||
"rsionInfo\022\017\n\007version\030\001 \002(\t\022\013\n\003url\030\002 \002(\t\022" +
|
||||
"\020\n\010revision\030\003 \002(\t\022\014\n\004user\030\004 \002(\t\022\014\n\004date\030" +
|
||||
"\005 \002(\t\022\024\n\014src_checksum\030\006 \002(\t\022\025\n\rversion_m" +
|
||||
"ajor\030\007 \001(\r\022\025\n\rversion_minor\030\010 \001(\r\"Q\n\020Reg",
|
||||
"ionServerInfo\022\020\n\010infoPort\030\001 \001(\005\022+\n\014versi" +
|
||||
"on_info\030\002 \001(\0132\025.hbase.pb.VersionInfo*r\n\013" +
|
||||
"CompareType\022\010\n\004LESS\020\000\022\021\n\rLESS_OR_EQUAL\020\001" +
|
||||
"\022\t\n\005EQUAL\020\002\022\r\n\tNOT_EQUAL\020\003\022\024\n\020GREATER_OR" +
|
||||
"_EQUAL\020\004\022\013\n\007GREATER\020\005\022\t\n\005NO_OP\020\006*n\n\010Time" +
|
||||
"Unit\022\017\n\013NANOSECONDS\020\001\022\020\n\014MICROSECONDS\020\002\022" +
|
||||
"\020\n\014MILLISECONDS\020\003\022\013\n\007SECONDS\020\004\022\013\n\007MINUTE" +
|
||||
"S\020\005\022\t\n\005HOURS\020\006\022\010\n\004DAYS\020\007B>\n*org.apache.h" +
|
||||
"adoop.hbase.protobuf.generatedB\013HBasePro" +
|
||||
"tosH\001\240\001\001"
|
||||
};
|
||||
com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner =
|
||||
new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() {
|
||||
|
@ -19084,7 +19265,7 @@ public final class HBaseProtos {
|
|||
internal_static_hbase_pb_VersionInfo_fieldAccessorTable = new
|
||||
com.google.protobuf.GeneratedMessage.FieldAccessorTable(
|
||||
internal_static_hbase_pb_VersionInfo_descriptor,
|
||||
new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", });
|
||||
new java.lang.String[] { "Version", "Url", "Revision", "User", "Date", "SrcChecksum", "VersionMajor", "VersionMinor", });
|
||||
internal_static_hbase_pb_RegionServerInfo_descriptor =
|
||||
getDescriptor().getMessageTypes().get(23);
|
||||
internal_static_hbase_pb_RegionServerInfo_fieldAccessorTable = new
|
||||
|
|
|
@ -211,6 +211,9 @@ message WALEntry {
|
|||
*/
|
||||
message ReplicateWALEntryRequest {
|
||||
repeated WALEntry entry = 1;
|
||||
optional string replicationClusterId = 2;
|
||||
optional string sourceBaseNamespaceDirPath = 3;
|
||||
optional string sourceHFileArchiveDirPath = 4;
|
||||
}
|
||||
|
||||
message ReplicateWALEntryResponse {
|
||||
|
|
|
@ -282,6 +282,7 @@ message ScanRequest {
|
|||
optional bool client_handles_partials = 7;
|
||||
optional bool client_handles_heartbeats = 8;
|
||||
optional bool track_scan_metrics = 9;
|
||||
optional bool renew = 10 [default = false];
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -227,6 +227,8 @@ message VersionInfo {
|
|||
required string user = 4;
|
||||
required string date = 5;
|
||||
required string src_checksum = 6;
|
||||
optional uint32 version_major = 7;
|
||||
optional uint32 version_minor = 8;
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -51,6 +51,7 @@ for (Map.Entry<String, RegionState> e : rit.entrySet()) {
|
|||
}
|
||||
}
|
||||
|
||||
int totalRITs = rit.size();
|
||||
int toRemove = rit.size() - limit;
|
||||
int removed = 0;
|
||||
if (toRemove > 0) {
|
||||
|
@ -90,7 +91,7 @@ if (toRemove > 0) {
|
|||
</%for>
|
||||
<tr BGCOLOR="#D7DF01"> <td>Total number of Regions in Transition for more than <% ritThreshold %> milliseconds</td><td> <% numOfRITOverThreshold %></td><td></td>
|
||||
</tr>
|
||||
<tr> <td> Total number of Regions in Transition</td><td><% rit.size() %> </td><td></td>
|
||||
<tr> <td> Total number of Regions in Transition</td><td><% totalRITs %> </td><td></td>
|
||||
</table>
|
||||
<%if removed > 0 %>
|
||||
(<% removed %> more regions in transition not shown)
|
||||
|
|
|
@ -104,7 +104,8 @@ org.apache.hadoop.hbase.zookeeper.MasterAddressTracker;
|
|||
|
||||
<section>
|
||||
<h2>Server Metrics</h2>
|
||||
<& ServerMetricsTmpl; mWrap = regionServer.getRegionServerMetrics().getRegionServerWrapper(); &>
|
||||
<& ServerMetricsTmpl; mWrap = regionServer.getRegionServerMetrics().getRegionServerWrapper();
|
||||
mServerWrap = regionServer.getRpcServer().getMetrics().getHBaseServerWrapper(); &>
|
||||
</section>
|
||||
|
||||
<section>
|
||||
|
|
|
@ -95,7 +95,9 @@
|
|||
|
||||
<%for HRegionInfo r: onlineRegions %>
|
||||
<tr>
|
||||
<td><% r.getRegionNameAsString() %></td>
|
||||
<td><a href="region.jsp?name=<% r.getEncodedName() %>">
|
||||
<% r.getRegionNameAsString() %></a>
|
||||
</td>
|
||||
<td><% Bytes.toStringBinary(r.getStartKey()) %></td>
|
||||
<td><% Bytes.toStringBinary(r.getEndKey()) %></td>
|
||||
<td><% r.getReplicaId() %></td>
|
||||
|
@ -121,7 +123,9 @@
|
|||
<%java>
|
||||
RegionLoad load = regionServer.createRegionLoad(r.getEncodedName());
|
||||
</%java>
|
||||
<td><% r.getRegionNameAsString() %></td>
|
||||
<td><a href="region.jsp?name=<% r.getEncodedName() %>">
|
||||
<% r.getRegionNameAsString() %></a>
|
||||
</td>
|
||||
<%if load != null %>
|
||||
<td><% load.getReadRequestsCount() %></td>
|
||||
<td><% load.getWriteRequestsCount() %></td>
|
||||
|
@ -154,7 +158,9 @@
|
|||
<%java>
|
||||
RegionLoad load = regionServer.createRegionLoad(r.getEncodedName());
|
||||
</%java>
|
||||
<td><% r.getRegionNameAsString() %></td>
|
||||
<td><a href="region.jsp?name=<% r.getEncodedName() %>">
|
||||
<% r.getRegionNameAsString() %></a>
|
||||
</td>
|
||||
<%if load != null %>
|
||||
<td><% load.getStores() %></td>
|
||||
<td><% load.getStorefiles() %></td>
|
||||
|
@ -193,7 +199,9 @@
|
|||
((float) load.getCurrentCompactedKVs() / load.getTotalCompactingKVs())) + "%";
|
||||
}
|
||||
</%java>
|
||||
<td><% r.getRegionNameAsString() %></td>
|
||||
<td><a href="region.jsp?name=<% r.getEncodedName() %>">
|
||||
<% r.getRegionNameAsString() %></a>
|
||||
</td>
|
||||
<%if load != null %>
|
||||
<td><% load.getTotalCompactingKVs() %></td>
|
||||
<td><% load.getCurrentCompactedKVs() %></td>
|
||||
|
@ -220,7 +228,9 @@
|
|||
<%java>
|
||||
RegionLoad load = regionServer.createRegionLoad(r.getEncodedName());
|
||||
</%java>
|
||||
<td><% r.getRegionNameAsString() %></td>
|
||||
<td><a href="region.jsp?name=<% r.getEncodedName() %>">
|
||||
<% r.getRegionNameAsString() %></a>
|
||||
</td>
|
||||
<%if load != null %>
|
||||
<td><% load.getMemstoreSizeMB() %>m</td>
|
||||
</%if>
|
||||
|
|
|
@ -18,10 +18,12 @@ limitations under the License.
|
|||
</%doc>
|
||||
<%args>
|
||||
MetricsRegionServerWrapper mWrap;
|
||||
MetricsHBaseServerWrapper mServerWrap;
|
||||
</%args>
|
||||
<%import>
|
||||
java.util.*;
|
||||
org.apache.hadoop.hbase.regionserver.HRegionServer;
|
||||
org.apache.hadoop.hbase.ipc.MetricsHBaseServerWrapper;
|
||||
org.apache.hadoop.hbase.regionserver.MetricsRegionServerWrapper;
|
||||
org.apache.hadoop.hbase.util.Bytes;
|
||||
org.apache.hadoop.hbase.HRegionInfo;
|
||||
|
@ -61,7 +63,7 @@ java.lang.management.ManagementFactory;
|
|||
<& storeStats; mWrap = mWrap &>
|
||||
</div>
|
||||
<div class="tab-pane" id="tab_queueStats">
|
||||
<& queueStats; mWrap = mWrap &>
|
||||
<& queueStats; mWrap = mWrap; mServerWrap = mServerWrap; &>
|
||||
</div>
|
||||
</div>
|
||||
</div>
|
||||
|
@ -184,16 +186,20 @@ MetricsRegionServerWrapper mWrap;
|
|||
<%def queueStats>
|
||||
<%args>
|
||||
MetricsRegionServerWrapper mWrap;
|
||||
MetricsHBaseServerWrapper mServerWrap;
|
||||
</%args>
|
||||
<table class="table table-striped">
|
||||
<tr>
|
||||
<th>Compaction Queue Size</th>
|
||||
<th>Flush Queue Size</th>
|
||||
<th>Call Queue Size (bytes)</th>
|
||||
|
||||
</tr>
|
||||
<tr>
|
||||
<td><% mWrap.getCompactionQueueSize() %></td>
|
||||
<td><% mWrap.getFlushQueueSize() %></td>
|
||||
<td><% StringUtils.TraditionalBinaryPrefix.long2String(mServerWrap.getTotalQueueSize(),
|
||||
"", 1) %></td>
|
||||
</tr>
|
||||
</table>
|
||||
</%def>
|
||||
|
|
|
@ -22,6 +22,8 @@ import java.io.IOException;
|
|||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
|
@ -33,7 +35,6 @@ import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
|||
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
|
||||
import org.apache.hadoop.hbase.regionserver.HRegion;
|
||||
import org.apache.hadoop.hbase.regionserver.RegionScanner;
|
||||
import org.mortbay.log.Log;
|
||||
|
||||
/**
|
||||
* A client scanner for a region opened for read-only on the client side. Assumes region data
|
||||
|
@ -42,6 +43,8 @@ import org.mortbay.log.Log;
|
|||
@InterfaceAudience.Private
|
||||
public class ClientSideRegionScanner extends AbstractClientScanner {
|
||||
|
||||
private static final Log LOG = LogFactory.getLog(ClientSideRegionScanner.class);
|
||||
|
||||
private HRegion region;
|
||||
RegionScanner scanner;
|
||||
List<Cell> values;
|
||||
|
@ -96,7 +99,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
|
|||
this.scanner.close();
|
||||
this.scanner = null;
|
||||
} catch (IOException ex) {
|
||||
Log.warn("Exception while closing scanner", ex);
|
||||
LOG.warn("Exception while closing scanner", ex);
|
||||
}
|
||||
}
|
||||
if (this.region != null) {
|
||||
|
@ -105,7 +108,7 @@ public class ClientSideRegionScanner extends AbstractClientScanner {
|
|||
this.region.close(true);
|
||||
this.region = null;
|
||||
} catch (IOException ex) {
|
||||
Log.warn("Exception while closing region", ex);
|
||||
LOG.warn("Exception while closing region", ex);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,71 @@
|
|||
/**
|
||||
* Licensed to the Apache Software Foundation (ASF) under one
|
||||
* or more contributor license agreements. See the NOTICE file
|
||||
* distributed with this work for additional information
|
||||
* regarding copyright ownership. The ASF licenses this file
|
||||
* to you under the Apache License, Version 2.0 (the
|
||||
* "License"); you may not use this file except in compliance
|
||||
* with the License. You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
package org.apache.hadoop.hbase.client;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.ipc.RpcCallContext;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
|
||||
|
||||
/**
|
||||
* Class to help with parsing the version info.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public final class VersionInfoUtil {
|
||||
|
||||
private VersionInfoUtil() {
|
||||
/* UTIL CLASS ONLY */
|
||||
}
|
||||
|
||||
public static boolean currentClientHasMinimumVersion(int major, int minor) {
|
||||
RpcCallContext call = RpcServer.getCurrentCall();
|
||||
HBaseProtos.VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null;
|
||||
return hasMinimumVersion(versionInfo, major, minor);
|
||||
}
|
||||
|
||||
public static boolean hasMinimumVersion(HBaseProtos.VersionInfo versionInfo,
|
||||
int major,
|
||||
int minor) {
|
||||
if (versionInfo != null) {
|
||||
if (versionInfo.hasVersionMajor() && versionInfo.hasVersionMinor()) {
|
||||
int clientMajor = versionInfo.getVersionMajor();
|
||||
if (clientMajor != major) {
|
||||
return clientMajor > major;
|
||||
}
|
||||
int clientMinor = versionInfo.getVersionMinor();
|
||||
return clientMinor >= minor;
|
||||
}
|
||||
try {
|
||||
String[] components = versionInfo.getVersion().split("\\.");
|
||||
|
||||
int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0;
|
||||
if (clientMajor != major) {
|
||||
return clientMajor > major;
|
||||
}
|
||||
|
||||
int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0;
|
||||
return clientMinor >= minor;
|
||||
} catch (NumberFormatException e) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
}
|
|
@ -104,8 +104,11 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
*/
|
||||
private static Set<String> coprocessorNames =
|
||||
Collections.synchronizedSet(new HashSet<String>());
|
||||
|
||||
public static Set<String> getLoadedCoprocessors() {
|
||||
return coprocessorNames;
|
||||
synchronized (coprocessorNames) {
|
||||
return new HashSet(coprocessorNames);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -349,6 +352,7 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
*/
|
||||
static class EnvironmentPriorityComparator
|
||||
implements Comparator<CoprocessorEnvironment> {
|
||||
@Override
|
||||
public int compare(final CoprocessorEnvironment env1,
|
||||
final CoprocessorEnvironment env2) {
|
||||
if (env1.getPriority() < env2.getPriority()) {
|
||||
|
@ -437,14 +441,16 @@ public abstract class CoprocessorHost<E extends CoprocessorEnvironment> {
|
|||
LOG.warn("Not stopping coprocessor "+impl.getClass().getName()+
|
||||
" because not active (state="+state.toString()+")");
|
||||
}
|
||||
// clean up any table references
|
||||
for (HTableInterface table: openTables) {
|
||||
try {
|
||||
((HTableWrapper)table).internalClose();
|
||||
} catch (IOException e) {
|
||||
// nothing can be done here
|
||||
LOG.warn("Failed to close " +
|
||||
Bytes.toStringBinary(table.getTableName()), e);
|
||||
synchronized (openTables) {
|
||||
// clean up any table references
|
||||
for (HTableInterface table: openTables) {
|
||||
try {
|
||||
((HTableWrapper)table).internalClose();
|
||||
} catch (IOException e) {
|
||||
// nothing can be done here
|
||||
LOG.warn("Failed to close " +
|
||||
Bytes.toStringBinary(table.getTableName()), e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -121,6 +121,7 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
|
|||
return this;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void run() {
|
||||
TraceScope chunk = Trace.startSpan(this.getClass().getSimpleName(), parent);
|
||||
try {
|
||||
|
@ -223,6 +224,10 @@ public abstract class EventHandler implements Runnable, Comparable<Runnable> {
|
|||
* @param t Throwable object
|
||||
*/
|
||||
protected void handleException(Throwable t) {
|
||||
LOG.error("Caught throwable while processing event " + eventType, t);
|
||||
String msg = "Caught throwable while processing event " + eventType;
|
||||
LOG.error(msg, t);
|
||||
if (server != null) {
|
||||
server.abort(msg, t);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -260,13 +260,14 @@ public class CacheStats {
|
|||
}
|
||||
|
||||
public double getHitRatioPastNPeriods() {
|
||||
double ratio = ((double)sum(hitCounts)/(double)sum(requestCounts));
|
||||
double ratio = ((double)getSumHitCountsPastNPeriods() /
|
||||
(double)getSumRequestCountsPastNPeriods());
|
||||
return Double.isNaN(ratio) ? 0 : ratio;
|
||||
}
|
||||
|
||||
public double getHitCachingRatioPastNPeriods() {
|
||||
double ratio =
|
||||
((double)sum(hitCachingCounts)/(double)sum(requestCachingCounts));
|
||||
double ratio = ((double)getSumHitCachingCountsPastNPeriods() /
|
||||
(double)getSumRequestCachingCountsPastNPeriods());
|
||||
return Double.isNaN(ratio) ? 0 : ratio;
|
||||
}
|
||||
|
||||
|
|
|
@ -201,22 +201,38 @@ public class CombinedBlockCache implements ResizableBlockCache, HeapSize {
|
|||
}
|
||||
|
||||
@Override
|
||||
public double getHitRatioPastNPeriods() {
|
||||
double ratio = ((double) (lruCacheStats.getSumHitCountsPastNPeriods() + bucketCacheStats
|
||||
.getSumHitCountsPastNPeriods()) / (double) (lruCacheStats
|
||||
.getSumRequestCountsPastNPeriods() + bucketCacheStats
|
||||
.getSumRequestCountsPastNPeriods()));
|
||||
return Double.isNaN(ratio) ? 0 : ratio;
|
||||
public void rollMetricsPeriod() {
|
||||
lruCacheStats.rollMetricsPeriod();
|
||||
bucketCacheStats.rollMetricsPeriod();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getFailedInserts() {
|
||||
return lruCacheStats.getFailedInserts() + bucketCacheStats.getFailedInserts();
|
||||
}
|
||||
|
||||
@Override
|
||||
public double getHitCachingRatioPastNPeriods() {
|
||||
double ratio = ((double) (lruCacheStats
|
||||
.getSumHitCachingCountsPastNPeriods() + bucketCacheStats
|
||||
.getSumHitCachingCountsPastNPeriods()) / (double) (lruCacheStats
|
||||
.getSumRequestCachingCountsPastNPeriods() + bucketCacheStats
|
||||
.getSumRequestCachingCountsPastNPeriods()));
|
||||
return Double.isNaN(ratio) ? 0 : ratio;
|
||||
public long getSumHitCountsPastNPeriods() {
|
||||
return lruCacheStats.getSumHitCountsPastNPeriods()
|
||||
+ bucketCacheStats.getSumHitCountsPastNPeriods();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumRequestCountsPastNPeriods() {
|
||||
return lruCacheStats.getSumRequestCountsPastNPeriods()
|
||||
+ bucketCacheStats.getSumRequestCountsPastNPeriods();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumHitCachingCountsPastNPeriods() {
|
||||
return lruCacheStats.getSumHitCachingCountsPastNPeriods()
|
||||
+ bucketCacheStats.getSumHitCachingCountsPastNPeriods();
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getSumRequestCachingCountsPastNPeriods() {
|
||||
return lruCacheStats.getSumRequestCachingCountsPastNPeriods()
|
||||
+ bucketCacheStats.getSumRequestCachingCountsPastNPeriods();
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,6 +19,7 @@
|
|||
|
||||
package org.apache.hadoop.hbase.ipc;
|
||||
|
||||
import org.apache.hadoop.hbase.MultiActionResultTooLarge;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.RegionTooBusyException;
|
||||
import org.apache.hadoop.hbase.UnknownScannerException;
|
||||
|
@ -31,8 +32,10 @@ import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
|||
@InterfaceAudience.Private
|
||||
public class MetricsHBaseServer {
|
||||
private MetricsHBaseServerSource source;
|
||||
private MetricsHBaseServerWrapper serverWrapper;
|
||||
|
||||
public MetricsHBaseServer(String serverName, MetricsHBaseServerWrapper wrapper) {
|
||||
serverWrapper = wrapper;
|
||||
source = CompatibilitySingletonFactory.getInstance(MetricsHBaseServerSourceFactory.class)
|
||||
.create(serverName, wrapper);
|
||||
}
|
||||
|
@ -105,6 +108,8 @@ public class MetricsHBaseServer {
|
|||
source.notServingRegionException();
|
||||
} else if (throwable instanceof FailedSanityCheckException) {
|
||||
source.failedSanityException();
|
||||
} else if (throwable instanceof MultiActionResultTooLarge) {
|
||||
source.multiActionTooLargeException();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -112,4 +117,8 @@ public class MetricsHBaseServer {
|
|||
public MetricsHBaseServerSource getMetricsSource() {
|
||||
return source;
|
||||
}
|
||||
|
||||
public MetricsHBaseServerWrapper getHBaseServerWrapper() {
|
||||
return serverWrapper;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,10 +19,11 @@ package org.apache.hadoop.hbase.ipc;
|
|||
|
||||
import java.net.InetAddress;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
|
||||
import org.apache.hadoop.hbase.security.User;
|
||||
|
||||
|
||||
@InterfaceAudience.Private
|
||||
public interface RpcCallContext extends Delayable {
|
||||
/**
|
||||
* Check if the caller who made this IPC call has disconnected.
|
||||
|
@ -40,7 +41,7 @@ public interface RpcCallContext extends Delayable {
|
|||
* support cellblocks while fielding requests from clients that do not.
|
||||
* @return True if the client supports cellblocks, else return all content in pb
|
||||
*/
|
||||
boolean isClientCellBlockSupport();
|
||||
boolean isClientCellBlockSupported();
|
||||
|
||||
/**
|
||||
* Returns the user credentials associated with the current RPC request or
|
||||
|
@ -63,4 +64,25 @@ public interface RpcCallContext extends Delayable {
|
|||
* @return the client version info, or null if the information is not present
|
||||
*/
|
||||
VersionInfo getClientVersionInfo();
|
||||
|
||||
boolean isRetryImmediatelySupported();
|
||||
|
||||
/**
|
||||
* The size of response cells that have been accumulated so far.
|
||||
* This along with the corresponding increment call is used to ensure that multi's or
|
||||
* scans dont get too excessively large
|
||||
*/
|
||||
long getResponseCellSize();
|
||||
|
||||
/**
|
||||
* Add on the given amount to the retained cell size.
|
||||
*
|
||||
* This is not thread safe and not synchronized at all. If this is used by more than one thread
|
||||
* then everything will break. Since this is called for every row synchronization would be too
|
||||
* onerous.
|
||||
*/
|
||||
void incrementResponseCellSize(long cellSize);
|
||||
|
||||
long getResponseBlockSize();
|
||||
void incrementResponseBlockSize(long blockSize);
|
||||
}
|
||||
|
|
|
@ -80,6 +80,7 @@ import org.apache.hadoop.hbase.Server;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.client.NeedUnmanagedConnectionException;
|
||||
import org.apache.hadoop.hbase.client.Operation;
|
||||
import org.apache.hadoop.hbase.client.VersionInfoUtil;
|
||||
import org.apache.hadoop.hbase.codec.Codec;
|
||||
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
|
||||
import org.apache.hadoop.hbase.exceptions.RegionMovedException;
|
||||
|
@ -316,6 +317,10 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
private User user;
|
||||
private InetAddress remoteAddress;
|
||||
|
||||
private long responseCellSize = 0;
|
||||
private long responseBlockSize = 0;
|
||||
private boolean retryImmediatelySupported;
|
||||
|
||||
Call(int id, final BlockingService service, final MethodDescriptor md, RequestHeader header,
|
||||
Message param, CellScanner cellScanner, Connection connection, Responder responder,
|
||||
long size, TraceInfo tinfo, final InetAddress remoteAddress) {
|
||||
|
@ -335,6 +340,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
this.tinfo = tinfo;
|
||||
this.user = connection.user;
|
||||
this.remoteAddress = remoteAddress;
|
||||
this.retryImmediatelySupported = connection.retryImmediatelySupported;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -511,7 +517,7 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
}
|
||||
|
||||
@Override
|
||||
public boolean isClientCellBlockSupport() {
|
||||
public boolean isClientCellBlockSupported() {
|
||||
return this.connection != null && this.connection.codec != null;
|
||||
}
|
||||
|
||||
|
@ -528,6 +534,24 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
return this.size;
|
||||
}
|
||||
|
||||
public long getResponseCellSize() {
|
||||
return responseCellSize;
|
||||
}
|
||||
|
||||
public void incrementResponseCellSize(long cellSize) {
|
||||
responseCellSize += cellSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public long getResponseBlockSize() {
|
||||
return responseBlockSize;
|
||||
}
|
||||
|
||||
@Override
|
||||
public void incrementResponseBlockSize(long blockSize) {
|
||||
responseBlockSize += blockSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have a response, and delay is not set, then respond
|
||||
* immediately. Otherwise, do not respond to client. This is
|
||||
|
@ -563,6 +587,12 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
public VersionInfo getClientVersionInfo() {
|
||||
return connection.getVersionInfo();
|
||||
}
|
||||
|
||||
|
||||
@Override
|
||||
public boolean isRetryImmediatelySupported() {
|
||||
return retryImmediatelySupported;
|
||||
}
|
||||
}
|
||||
|
||||
/** Listens on the socket. Creates jobs for the handler threads*/
|
||||
|
@ -1248,6 +1278,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
// was authentication allowed with a fallback to simple auth
|
||||
private boolean authenticatedWithFallback;
|
||||
|
||||
private boolean retryImmediatelySupported = false;
|
||||
|
||||
public UserGroupInformation attemptingUser = null; // user name before auth
|
||||
protected User user = null;
|
||||
protected UserGroupInformation ugi = null;
|
||||
|
@ -1704,6 +1736,9 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
}
|
||||
}
|
||||
if (connectionHeader.hasVersionInfo()) {
|
||||
// see if this connection will support RetryImmediatelyException
|
||||
retryImmediatelySupported = VersionInfoUtil.hasMinimumVersion(getVersionInfo(), 1, 2);
|
||||
|
||||
AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
|
||||
+ " with version info: "
|
||||
+ TextFormat.shortDebugString(connectionHeader.getVersionInfo()));
|
||||
|
@ -1711,6 +1746,8 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
AUDITLOG.info("Connection from " + this.hostAddress + " port: " + this.remotePort
|
||||
+ " with unknown version info");
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1988,15 +2025,20 @@ public class RpcServer implements RpcServerInterface, ConfigurationObserver {
|
|||
final InetSocketAddress bindAddress, Configuration conf,
|
||||
RpcScheduler scheduler)
|
||||
throws IOException {
|
||||
this.reservoir = new BoundedByteBufferPool(
|
||||
conf.getInt("hbase.ipc.server.reservoir.max.buffer.size", 1024 * 1024),
|
||||
conf.getInt("hbase.ipc.server.reservoir.initial.buffer.size", 16 * 1024),
|
||||
// Make the max twice the number of handlers to be safe.
|
||||
conf.getInt("hbase.ipc.server.reservoir.initial.max",
|
||||
conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
|
||||
HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * 2),
|
||||
// By default make direct byte buffers from the buffer pool.
|
||||
conf.getBoolean("hbase.ipc.server.reservoir.direct.buffer", true));
|
||||
|
||||
if (conf.getBoolean("hbase.ipc.server.reservoir.enabled", true)) {
|
||||
this.reservoir = new BoundedByteBufferPool(
|
||||
conf.getInt("hbase.ipc.server.reservoir.max.buffer.size", 1024 * 1024),
|
||||
conf.getInt("hbase.ipc.server.reservoir.initial.buffer.size", 16 * 1024),
|
||||
// Make the max twice the number of handlers to be safe.
|
||||
conf.getInt("hbase.ipc.server.reservoir.initial.max",
|
||||
conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT,
|
||||
HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * 2),
|
||||
// By default make direct byte buffers from the buffer pool.
|
||||
conf.getBoolean("hbase.ipc.server.reservoir.direct.buffer", true));
|
||||
} else {
|
||||
reservoir = null;
|
||||
}
|
||||
this.server = server;
|
||||
this.services = services;
|
||||
this.bindAddress = bindAddress;
|
||||
|
|
|
@ -20,66 +20,6 @@ package org.apache.hadoop.hbase.mapreduce;
|
|||
|
||||
import static java.lang.String.format;
|
||||
|
||||
import com.google.common.collect.HashMultimap;
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Multimaps;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
import org.apache.commons.lang.mutable.MutableInt;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.KeyValue;
|
||||
import org.apache.hadoop.hbase.KeyValueUtil;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HConnection;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.NeedUnmanagedConnectionException;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
||||
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.client.coprocessor.SecureBulkLoadClient;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.security.UserProvider;
|
||||
import org.apache.hadoop.hbase.security.token.FsDelegationToken;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSHDFSUtils;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import java.io.FileNotFoundException;
|
||||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
|
@ -106,6 +46,64 @@ import java.util.concurrent.LinkedBlockingQueue;
|
|||
import java.util.concurrent.ThreadPoolExecutor;
|
||||
import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.lang.mutable.MutableInt;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.conf.Configured;
|
||||
import org.apache.hadoop.fs.FileStatus;
|
||||
import org.apache.hadoop.fs.FileSystem;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.fs.permission.FsPermission;
|
||||
import org.apache.hadoop.hbase.HBaseConfiguration;
|
||||
import org.apache.hadoop.hbase.HColumnDescriptor;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.TableNotFoundException;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.client.Admin;
|
||||
import org.apache.hadoop.hbase.client.ClusterConnection;
|
||||
import org.apache.hadoop.hbase.client.Connection;
|
||||
import org.apache.hadoop.hbase.client.ConnectionFactory;
|
||||
import org.apache.hadoop.hbase.client.HBaseAdmin;
|
||||
import org.apache.hadoop.hbase.client.HConnection;
|
||||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.RegionLocator;
|
||||
import org.apache.hadoop.hbase.client.RegionServerCallable;
|
||||
import org.apache.hadoop.hbase.client.RpcRetryingCallerFactory;
|
||||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.client.coprocessor.SecureBulkLoadClient;
|
||||
import org.apache.hadoop.hbase.coprocessor.CoprocessorHost;
|
||||
import org.apache.hadoop.hbase.io.HFileLink;
|
||||
import org.apache.hadoop.hbase.io.HalfStoreFileReader;
|
||||
import org.apache.hadoop.hbase.io.Reference;
|
||||
import org.apache.hadoop.hbase.io.compress.Compression.Algorithm;
|
||||
import org.apache.hadoop.hbase.io.hfile.CacheConfig;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFile;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContext;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileContextBuilder;
|
||||
import org.apache.hadoop.hbase.io.hfile.HFileScanner;
|
||||
import org.apache.hadoop.hbase.protobuf.ProtobufUtil;
|
||||
import org.apache.hadoop.hbase.regionserver.BloomType;
|
||||
import org.apache.hadoop.hbase.regionserver.HStore;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFile;
|
||||
import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
|
||||
import org.apache.hadoop.hbase.security.UserProvider;
|
||||
import org.apache.hadoop.hbase.security.access.SecureBulkLoadEndpoint;
|
||||
import org.apache.hadoop.hbase.security.token.FsDelegationToken;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.FSHDFSUtils;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
import org.apache.hadoop.util.ToolRunner;
|
||||
|
||||
import com.google.common.collect.HashMultimap;
|
||||
import com.google.common.collect.Multimap;
|
||||
import com.google.common.collect.Multimaps;
|
||||
import com.google.common.util.concurrent.ThreadFactoryBuilder;
|
||||
|
||||
/**
|
||||
* Tool to load the output of HFileOutputFormat into an existing table.
|
||||
* @see #usage()
|
||||
|
@ -131,6 +129,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
private FsDelegationToken fsDelegationToken;
|
||||
private String bulkToken;
|
||||
private UserProvider userProvider;
|
||||
private int nrThreads;
|
||||
|
||||
private LoadIncrementalHFiles() {}
|
||||
|
||||
|
@ -151,6 +150,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
|
||||
assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
|
||||
maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
|
||||
nrThreads = conf.getInt("hbase.loadincremental.threads.max",
|
||||
Runtime.getRuntime().availableProcessors());
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,7 +252,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
* region boundary, and each part is added back into the queue.
|
||||
* The import process finishes when the queue is empty.
|
||||
*/
|
||||
static class LoadQueueItem {
|
||||
public static class LoadQueueItem {
|
||||
final byte[] family;
|
||||
final Path hfilePath;
|
||||
|
||||
|
@ -343,24 +344,14 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
* @param table the table to load into
|
||||
* @throws TableNotFoundException if table does not yet exist
|
||||
*/
|
||||
@SuppressWarnings("deprecation")
|
||||
public void doBulkLoad(Path hfofDir, final Admin admin, Table table,
|
||||
RegionLocator regionLocator) throws TableNotFoundException, IOException {
|
||||
|
||||
if (!admin.isTableAvailable(regionLocator.getName())) {
|
||||
throw new TableNotFoundException("Table " + table.getName() + "is not currently available.");
|
||||
throw new TableNotFoundException("Table " + table.getName() + " is not currently available.");
|
||||
}
|
||||
|
||||
// initialize thread pools
|
||||
int nrThreads = getConf().getInt("hbase.loadincremental.threads.max",
|
||||
Runtime.getRuntime().availableProcessors());
|
||||
ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
|
||||
builder.setNameFormat("LoadIncrementalHFiles-%1$d");
|
||||
ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads,
|
||||
60, TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>(),
|
||||
builder.build());
|
||||
((ThreadPoolExecutor)pool).allowCoreThreadTimeOut(true);
|
||||
ExecutorService pool = createExecutorService();
|
||||
|
||||
// LQI queue does not need to be threadsafe -- all operations on this queue
|
||||
// happen in this thread
|
||||
|
@ -377,30 +368,8 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
"option, consider removing the files and bulkload again without this option. " +
|
||||
"See HBASE-13985");
|
||||
}
|
||||
discoverLoadQueue(queue, hfofDir, validateHFile);
|
||||
// check whether there is invalid family name in HFiles to be bulkloaded
|
||||
Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
|
||||
ArrayList<String> familyNames = new ArrayList<String>(families.size());
|
||||
for (HColumnDescriptor family : families) {
|
||||
familyNames.add(family.getNameAsString());
|
||||
}
|
||||
ArrayList<String> unmatchedFamilies = new ArrayList<String>();
|
||||
Iterator<LoadQueueItem> queueIter = queue.iterator();
|
||||
while (queueIter.hasNext()) {
|
||||
LoadQueueItem lqi = queueIter.next();
|
||||
String familyNameInHFile = Bytes.toString(lqi.family);
|
||||
if (!familyNames.contains(familyNameInHFile)) {
|
||||
unmatchedFamilies.add(familyNameInHFile);
|
||||
}
|
||||
}
|
||||
if (unmatchedFamilies.size() > 0) {
|
||||
String msg =
|
||||
"Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
|
||||
+ unmatchedFamilies + "; valid family names of table "
|
||||
+ table.getName() + " are: " + familyNames;
|
||||
LOG.error(msg);
|
||||
throw new IOException(msg);
|
||||
}
|
||||
prepareHFileQueue(hfofDir, table, queue, validateHFile);
|
||||
|
||||
int count = 0;
|
||||
|
||||
if (queue.isEmpty()) {
|
||||
|
@ -427,7 +396,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
+ count + " with " + queue.size() + " files remaining to group or split");
|
||||
}
|
||||
|
||||
int maxRetries = getConf().getInt("hbase.bulkload.retries.number", 10);
|
||||
int maxRetries = getConf().getInt(HConstants.BULKLOAD_MAX_RETRIES_NUMBER, 10);
|
||||
maxRetries = Math.max(maxRetries, startEndKeys.getFirst().length + 1);
|
||||
if (maxRetries != 0 && count >= maxRetries) {
|
||||
throw new IOException("Retry attempted " + count +
|
||||
|
@ -476,6 +445,86 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Prepare a collection of {@link LoadQueueItem} from list of source hfiles contained in the
|
||||
* passed directory and validates whether the prepared queue has all the valid table column
|
||||
* families in it.
|
||||
* @param hfilesDir directory containing list of hfiles to be loaded into the table
|
||||
* @param table table to which hfiles should be loaded
|
||||
* @param queue queue which needs to be loaded into the table
|
||||
* @param validateHFile if true hfiles will be validated for its format
|
||||
* @throws IOException If any I/O or network error occurred
|
||||
*/
|
||||
public void prepareHFileQueue(Path hfilesDir, Table table, Deque<LoadQueueItem> queue,
|
||||
boolean validateHFile) throws IOException {
|
||||
discoverLoadQueue(queue, hfilesDir, validateHFile);
|
||||
validateFamiliesInHFiles(table, queue);
|
||||
}
|
||||
|
||||
// Initialize a thread pool
|
||||
private ExecutorService createExecutorService() {
|
||||
ThreadFactoryBuilder builder = new ThreadFactoryBuilder();
|
||||
builder.setNameFormat("LoadIncrementalHFiles-%1$d");
|
||||
ExecutorService pool = new ThreadPoolExecutor(nrThreads, nrThreads, 60, TimeUnit.SECONDS,
|
||||
new LinkedBlockingQueue<Runnable>(), builder.build());
|
||||
((ThreadPoolExecutor) pool).allowCoreThreadTimeOut(true);
|
||||
return pool;
|
||||
}
|
||||
|
||||
/**
|
||||
* Checks whether there is any invalid family name in HFiles to be bulk loaded.
|
||||
*/
|
||||
private void validateFamiliesInHFiles(Table table, Deque<LoadQueueItem> queue)
|
||||
throws IOException {
|
||||
Collection<HColumnDescriptor> families = table.getTableDescriptor().getFamilies();
|
||||
List<String> familyNames = new ArrayList<String>(families.size());
|
||||
for (HColumnDescriptor family : families) {
|
||||
familyNames.add(family.getNameAsString());
|
||||
}
|
||||
List<String> unmatchedFamilies = new ArrayList<String>();
|
||||
Iterator<LoadQueueItem> queueIter = queue.iterator();
|
||||
while (queueIter.hasNext()) {
|
||||
LoadQueueItem lqi = queueIter.next();
|
||||
String familyNameInHFile = Bytes.toString(lqi.family);
|
||||
if (!familyNames.contains(familyNameInHFile)) {
|
||||
unmatchedFamilies.add(familyNameInHFile);
|
||||
}
|
||||
}
|
||||
if (unmatchedFamilies.size() > 0) {
|
||||
String msg =
|
||||
"Unmatched family names found: unmatched family names in HFiles to be bulkloaded: "
|
||||
+ unmatchedFamilies + "; valid family names of table " + table.getName() + " are: "
|
||||
+ familyNames;
|
||||
LOG.error(msg);
|
||||
throw new IOException(msg);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Used by the replication sink to load the hfiles from the source cluster. It does the following,
|
||||
* 1. {@link LoadIncrementalHFiles#groupOrSplitPhase(Table, ExecutorService, Deque, Pair)} 2.
|
||||
* {@link
|
||||
* LoadIncrementalHFiles#bulkLoadPhase(Table, Connection, ExecutorService, Deque, Multimap)}
|
||||
* @param table Table to which these hfiles should be loaded to
|
||||
* @param conn Connection to use
|
||||
* @param queue {@link LoadQueueItem} has hfiles yet to be loaded
|
||||
* @param startEndKeys starting and ending row keys of the region
|
||||
*/
|
||||
public void loadHFileQueue(final Table table, final Connection conn, Deque<LoadQueueItem> queue,
|
||||
Pair<byte[][], byte[][]> startEndKeys) throws IOException {
|
||||
ExecutorService pool = null;
|
||||
try {
|
||||
pool = createExecutorService();
|
||||
Multimap<ByteBuffer, LoadQueueItem> regionGroups =
|
||||
groupOrSplitPhase(table, pool, queue, startEndKeys);
|
||||
bulkLoadPhase(table, conn, pool, queue, regionGroups);
|
||||
} finally {
|
||||
if (pool != null) {
|
||||
pool.shutdown();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* This takes the LQI's grouped by likely regions and attempts to bulk load
|
||||
* them. Any failures are re-queued for another pass with the
|
||||
|
@ -623,10 +672,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
|
||||
String uniqueName = getUniqueName();
|
||||
HColumnDescriptor familyDesc = table.getTableDescriptor().getFamily(item.family);
|
||||
|
||||
Path botOut = new Path(tmpDir, uniqueName + ".bottom");
|
||||
Path topOut = new Path(tmpDir, uniqueName + ".top");
|
||||
splitStoreFile(getConf(), hfilePath, familyDesc, splitKey,
|
||||
botOut, topOut);
|
||||
splitStoreFile(getConf(), hfilePath, familyDesc, splitKey, botOut, topOut);
|
||||
|
||||
FileSystem fs = tmpDir.getFileSystem(getConf());
|
||||
fs.setPermission(tmpDir, FsPermission.valueOf("-rwxrwxrwx"));
|
||||
|
@ -657,6 +706,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
final Pair<byte[][], byte[][]> startEndKeys)
|
||||
throws IOException {
|
||||
final Path hfilePath = item.hfilePath;
|
||||
// fs is the source filesystem
|
||||
if (fs == null) {
|
||||
fs = hfilePath.getFileSystem(getConf());
|
||||
}
|
||||
HFile.Reader hfr = HFile.createReader(fs, hfilePath,
|
||||
new CacheConfig(getConf()), getConf());
|
||||
final byte[] first, last;
|
||||
|
@ -756,7 +809,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
* failure
|
||||
*/
|
||||
protected List<LoadQueueItem> tryAtomicRegionLoad(final Connection conn,
|
||||
final TableName tableName, final byte[] first, Collection<LoadQueueItem> lqis)
|
||||
final TableName tableName, final byte[] first, final Collection<LoadQueueItem> lqis)
|
||||
throws IOException {
|
||||
final List<Pair<byte[], String>> famPaths =
|
||||
new ArrayList<Pair<byte[], String>>(lqis.size());
|
||||
|
@ -791,6 +844,10 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
//in user directory
|
||||
if(secureClient != null && !success) {
|
||||
FileSystem targetFs = FileSystem.get(getConf());
|
||||
// fs is the source filesystem
|
||||
if(fs == null) {
|
||||
fs = lqis.iterator().next().hfilePath.getFileSystem(getConf());
|
||||
}
|
||||
// Check to see if the source and target filesystems are the same
|
||||
// If they are the same filesystem, we will try move the files back
|
||||
// because previously we moved them to the staging directory.
|
||||
|
@ -887,6 +944,7 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
.withBytesPerCheckSum(HStore.getBytesPerChecksum(conf))
|
||||
.withBlockSize(blocksize)
|
||||
.withDataBlockEncoding(familyDescriptor.getDataBlockEncoding())
|
||||
.withIncludesTags(true)
|
||||
.build();
|
||||
halfWriter = new StoreFile.WriterBuilder(conf, cacheConf,
|
||||
fs)
|
||||
|
@ -1044,4 +1102,17 @@ public class LoadIncrementalHFiles extends Configured implements Tool {
|
|||
System.exit(ret);
|
||||
}
|
||||
|
||||
/**
|
||||
* Called from replication sink, where it manages bulkToken(staging directory) by itself. This is
|
||||
* used only when {@link SecureBulkLoadEndpoint} is configured in hbase.coprocessor.region.classes
|
||||
* property. This directory is used as a temporary directory where all files are initially
|
||||
* copied/moved from user given directory, set all the required file permissions and then from
|
||||
* their it is finally loaded into a table. This should be set only when, one would like to manage
|
||||
* the staging directory by itself. Otherwise this tool will handle this by itself.
|
||||
* @param stagingDir staging directory path
|
||||
*/
|
||||
public void setBulkToken(String stagingDir) {
|
||||
this.bulkToken = stagingDir;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -43,7 +43,6 @@ import org.apache.hadoop.hbase.client.Scan;
|
|||
import org.apache.hadoop.hbase.client.Table;
|
||||
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.mapreduce.Counters;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
|
||||
|
@ -174,8 +173,9 @@ public class SyncTable extends Configured implements Tool {
|
|||
|
||||
Configuration conf = context.getConfiguration();
|
||||
sourceHashDir = new Path(conf.get(SOURCE_HASH_DIR_CONF_KEY));
|
||||
sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY);
|
||||
targetConnection = openConnection(conf, TARGET_ZK_CLUSTER_CONF_KEY);
|
||||
sourceConnection = openConnection(conf, SOURCE_ZK_CLUSTER_CONF_KEY, null);
|
||||
targetConnection = openConnection(conf, TARGET_ZK_CLUSTER_CONF_KEY,
|
||||
TableOutputFormat.OUTPUT_CONF_PREFIX);
|
||||
sourceTable = openTable(sourceConnection, conf, SOURCE_TABLE_CONF_KEY);
|
||||
targetTable = openTable(targetConnection, conf, TARGET_TABLE_CONF_KEY);
|
||||
dryRun = conf.getBoolean(SOURCE_TABLE_CONF_KEY, false);
|
||||
|
@ -196,13 +196,12 @@ public class SyncTable extends Configured implements Tool {
|
|||
targetHasher = new HashTable.ResultHasher();
|
||||
}
|
||||
|
||||
private static Connection openConnection(Configuration conf, String zkClusterConfKey)
|
||||
private static Connection openConnection(Configuration conf, String zkClusterConfKey,
|
||||
String configPrefix)
|
||||
throws IOException {
|
||||
Configuration clusterConf = new Configuration(conf);
|
||||
String zkCluster = conf.get(zkClusterConfKey);
|
||||
if (zkCluster != null) {
|
||||
ZKUtil.applyClusterKeyToConf(clusterConf, zkCluster);
|
||||
}
|
||||
Configuration clusterConf = HBaseConfiguration.createClusterConf(conf,
|
||||
zkCluster, configPrefix);
|
||||
return ConnectionFactory.createConnection(clusterConf);
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@ import java.util.*;
|
|||
import java.util.zip.ZipEntry;
|
||||
import java.util.zip.ZipFile;
|
||||
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
|
@ -49,12 +50,11 @@ import org.apache.hadoop.hbase.security.UserProvider;
|
|||
import org.apache.hadoop.hbase.security.token.TokenUtil;
|
||||
import org.apache.hadoop.hbase.util.Base64;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKConfig;
|
||||
import org.apache.hadoop.io.Writable;
|
||||
import org.apache.hadoop.mapreduce.InputFormat;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.apache.hadoop.util.StringUtils;
|
||||
import com.google.protobuf.InvalidProtocolBufferException;
|
||||
|
||||
/**
|
||||
* Utility for {@link TableMapper} and {@link TableReducer}
|
||||
|
@ -475,12 +475,8 @@ public class TableMapReduceUtil {
|
|||
String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
|
||||
User user = userProvider.getCurrent();
|
||||
if (quorumAddress != null) {
|
||||
Configuration peerConf = HBaseConfiguration.create(job.getConfiguration());
|
||||
ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress);
|
||||
// apply any "hbase.mapred.output." configuration overrides
|
||||
Configuration outputOverrides =
|
||||
HBaseConfiguration.subset(peerConf, TableOutputFormat.OUTPUT_CONF_PREFIX);
|
||||
HBaseConfiguration.merge(peerConf, outputOverrides);
|
||||
Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
|
||||
quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
|
||||
Connection peerConn = ConnectionFactory.createConnection(peerConf);
|
||||
try {
|
||||
TokenUtil.addTokenForJob(peerConn, user, job);
|
||||
|
@ -513,15 +509,30 @@ public class TableMapReduceUtil {
|
|||
* @param job The job that requires the permission.
|
||||
* @param quorumAddress string that contains the 3 required configuratins
|
||||
* @throws IOException When the authentication token cannot be obtained.
|
||||
* @deprecated Since 1.2.0, use {@link #initCredentialsForCluster(Job, Configuration)} instead.
|
||||
*/
|
||||
@Deprecated
|
||||
public static void initCredentialsForCluster(Job job, String quorumAddress)
|
||||
throws IOException {
|
||||
Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
|
||||
quorumAddress);
|
||||
initCredentialsForCluster(job, peerConf);
|
||||
}
|
||||
|
||||
/**
|
||||
* Obtain an authentication token, for the specified cluster, on behalf of the current user
|
||||
* and add it to the credentials for the given map reduce job.
|
||||
*
|
||||
* @param job The job that requires the permission.
|
||||
* @param conf The configuration to use in connecting to the peer cluster
|
||||
* @throws IOException When the authentication token cannot be obtained.
|
||||
*/
|
||||
public static void initCredentialsForCluster(Job job, Configuration conf)
|
||||
throws IOException {
|
||||
UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
|
||||
if (userProvider.isHBaseSecurityEnabled()) {
|
||||
try {
|
||||
Configuration peerConf = HBaseConfiguration.create(job.getConfiguration());
|
||||
ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress);
|
||||
Connection peerConn = ConnectionFactory.createConnection(peerConf);
|
||||
Connection peerConn = ConnectionFactory.createConnection(conf);
|
||||
try {
|
||||
TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
|
||||
} finally {
|
||||
|
@ -670,7 +681,7 @@ public class TableMapReduceUtil {
|
|||
// If passed a quorum/ensemble address, pass it on to TableOutputFormat.
|
||||
if (quorumAddress != null) {
|
||||
// Calling this will validate the format
|
||||
ZKUtil.transformClusterKey(quorumAddress);
|
||||
ZKConfig.validateClusterKey(quorumAddress);
|
||||
conf.set(TableOutputFormat.QUORUM_ADDRESS,quorumAddress);
|
||||
}
|
||||
if (serverClass != null && serverImpl != null) {
|
||||
|
|
|
@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.client.Delete;
|
|||
import org.apache.hadoop.hbase.client.HTable;
|
||||
import org.apache.hadoop.hbase.client.Mutation;
|
||||
import org.apache.hadoop.hbase.client.Put;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.mapreduce.JobContext;
|
||||
import org.apache.hadoop.mapreduce.OutputCommitter;
|
||||
import org.apache.hadoop.mapreduce.OutputFormat;
|
||||
|
@ -191,22 +190,19 @@ implements Configurable {
|
|||
|
||||
@Override
|
||||
public void setConf(Configuration otherConf) {
|
||||
this.conf = HBaseConfiguration.create(otherConf);
|
||||
|
||||
String tableName = this.conf.get(OUTPUT_TABLE);
|
||||
String tableName = otherConf.get(OUTPUT_TABLE);
|
||||
if(tableName == null || tableName.length() <= 0) {
|
||||
throw new IllegalArgumentException("Must specify table name");
|
||||
}
|
||||
|
||||
String address = this.conf.get(QUORUM_ADDRESS);
|
||||
int zkClientPort = this.conf.getInt(QUORUM_PORT, 0);
|
||||
String serverClass = this.conf.get(REGION_SERVER_CLASS);
|
||||
String serverImpl = this.conf.get(REGION_SERVER_IMPL);
|
||||
String address = otherConf.get(QUORUM_ADDRESS);
|
||||
int zkClientPort = otherConf.getInt(QUORUM_PORT, 0);
|
||||
String serverClass = otherConf.get(REGION_SERVER_CLASS);
|
||||
String serverImpl = otherConf.get(REGION_SERVER_IMPL);
|
||||
|
||||
try {
|
||||
if (address != null) {
|
||||
ZKUtil.applyClusterKeyToConf(this.conf, address);
|
||||
}
|
||||
this.conf = HBaseConfiguration.createClusterConf(otherConf, address, OUTPUT_CONF_PREFIX);
|
||||
|
||||
if (serverClass != null) {
|
||||
this.conf.set(HConstants.REGION_SERVER_IMPL, serverImpl);
|
||||
}
|
||||
|
@ -217,9 +213,5 @@ implements Configurable {
|
|||
LOG.error(e);
|
||||
throw new RuntimeException(e);
|
||||
}
|
||||
|
||||
// finally apply any remaining "hbase.mapred.output." configuration overrides
|
||||
Configuration outputOverrides = HBaseConfiguration.subset(otherConf, OUTPUT_CONF_PREFIX);
|
||||
HBaseConfiguration.merge(this.conf, outputOverrides);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -47,7 +47,6 @@ import org.apache.hadoop.hbase.replication.ReplicationPeers;
|
|||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZooKeeperWatcher;
|
||||
import org.apache.hadoop.hbase.zookeeper.ZKUtil;
|
||||
import org.apache.hadoop.mapreduce.Job;
|
||||
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
|
||||
import org.apache.hadoop.util.Tool;
|
||||
|
@ -69,6 +68,7 @@ public class VerifyReplication extends Configured implements Tool {
|
|||
LogFactory.getLog(VerifyReplication.class);
|
||||
|
||||
public final static String NAME = "verifyrep";
|
||||
private final static String PEER_CONFIG_PREFIX = NAME + ".peer.";
|
||||
static long startTime = 0;
|
||||
static long endTime = Long.MAX_VALUE;
|
||||
static int versions = -1;
|
||||
|
@ -126,8 +126,8 @@ public class VerifyReplication extends Configured implements Tool {
|
|||
@Override
|
||||
public Void connect(HConnection conn) throws IOException {
|
||||
String zkClusterKey = conf.get(NAME + ".peerQuorumAddress");
|
||||
Configuration peerConf = HBaseConfiguration.create(conf);
|
||||
ZKUtil.applyClusterKeyToConf(peerConf, zkClusterKey);
|
||||
Configuration peerConf = HBaseConfiguration.createClusterConf(conf,
|
||||
zkClusterKey, PEER_CONFIG_PREFIX);
|
||||
|
||||
TableName tableName = TableName.valueOf(conf.get(NAME + ".tableName"));
|
||||
replicatedTable = new HTable(peerConf, tableName);
|
||||
|
@ -203,7 +203,8 @@ public class VerifyReplication extends Configured implements Tool {
|
|||
}
|
||||
}
|
||||
|
||||
private static String getPeerQuorumAddress(final Configuration conf) throws IOException {
|
||||
private static Pair<ReplicationPeerConfig, Configuration> getPeerQuorumConfig(
|
||||
final Configuration conf) throws IOException {
|
||||
ZooKeeperWatcher localZKW = null;
|
||||
ReplicationPeerZKImpl peer = null;
|
||||
try {
|
||||
|
@ -220,8 +221,8 @@ public class VerifyReplication extends Configured implements Tool {
|
|||
if (pair == null) {
|
||||
throw new IOException("Couldn't get peer conf!");
|
||||
}
|
||||
Configuration peerConf = rp.getPeerConf(peerId).getSecond();
|
||||
return ZKUtil.getZooKeeperClusterKey(peerConf);
|
||||
|
||||
return pair;
|
||||
} catch (ReplicationException e) {
|
||||
throw new IOException(
|
||||
"An error occured while trying to connect to the remove peer cluster", e);
|
||||
|
@ -260,9 +261,14 @@ public class VerifyReplication extends Configured implements Tool {
|
|||
conf.set(NAME+".families", families);
|
||||
}
|
||||
|
||||
String peerQuorumAddress = getPeerQuorumAddress(conf);
|
||||
Pair<ReplicationPeerConfig, Configuration> peerConfigPair = getPeerQuorumConfig(conf);
|
||||
ReplicationPeerConfig peerConfig = peerConfigPair.getFirst();
|
||||
String peerQuorumAddress = peerConfig.getClusterKey();
|
||||
LOG.info("Peer Quorum Address: " + peerQuorumAddress + ", Peer Configuration: " +
|
||||
peerConfig.getConfiguration());
|
||||
conf.set(NAME + ".peerQuorumAddress", peerQuorumAddress);
|
||||
LOG.info("Peer Quorum Address: " + peerQuorumAddress);
|
||||
HBaseConfiguration.setWithPrefix(conf, PEER_CONFIG_PREFIX,
|
||||
peerConfig.getConfiguration().entrySet());
|
||||
|
||||
conf.setInt(NAME + ".versions", versions);
|
||||
LOG.info("Number of version: " + versions);
|
||||
|
@ -285,8 +291,9 @@ public class VerifyReplication extends Configured implements Tool {
|
|||
TableMapReduceUtil.initTableMapperJob(tableName, scan,
|
||||
Verifier.class, null, null, job);
|
||||
|
||||
Configuration peerClusterConf = peerConfigPair.getSecond();
|
||||
// Obtain the auth token from peer cluster
|
||||
TableMapReduceUtil.initCredentialsForCluster(job, peerQuorumAddress);
|
||||
TableMapReduceUtil.initCredentialsForCluster(job, peerClusterConf);
|
||||
|
||||
job.setOutputFormatClass(NullOutputFormat.class);
|
||||
job.setNumReduceTasks(0);
|
||||
|
|
|
@ -3720,7 +3720,7 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
invokeAssign(hri, false);
|
||||
}
|
||||
|
||||
private String onRegionSplit(ServerName sn, TransitionCode code,
|
||||
private String checkInStateForSplit(ServerName sn,
|
||||
final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
|
||||
final RegionState rs_p = regionStates.getRegionState(p);
|
||||
RegionState rs_a = regionStates.getRegionState(a);
|
||||
|
@ -3730,6 +3730,32 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
&& (rs_b == null || rs_b.isOpenOrSplittingNewOnServer(sn)))) {
|
||||
return "Not in state good for split";
|
||||
}
|
||||
return "";
|
||||
}
|
||||
|
||||
private String onRegionSplitReverted(ServerName sn,
|
||||
final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
|
||||
String s = checkInStateForSplit(sn, p, a, b);
|
||||
if (!org.apache.commons.lang.StringUtils.isEmpty(s)) {
|
||||
return s;
|
||||
}
|
||||
regionOnline(p, sn);
|
||||
regionOffline(a);
|
||||
regionOffline(b);
|
||||
|
||||
if (getTableStateManager().isTableState(p.getTable(),
|
||||
ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
|
||||
invokeUnAssign(p);
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
private String onRegionSplit(ServerName sn, TransitionCode code,
|
||||
final HRegionInfo p, final HRegionInfo a, final HRegionInfo b) {
|
||||
String s = checkInStateForSplit(sn, p, a, b);
|
||||
if (!org.apache.commons.lang.StringUtils.isEmpty(s)) {
|
||||
return s;
|
||||
}
|
||||
|
||||
regionStates.updateRegionState(a, State.SPLITTING_NEW, sn);
|
||||
regionStates.updateRegionState(b, State.SPLITTING_NEW, sn);
|
||||
|
@ -3765,15 +3791,6 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
LOG.info("Failed to record split region " + p.getShortNameToLog());
|
||||
return "Failed to record the splitting in meta";
|
||||
}
|
||||
} else if (code == TransitionCode.SPLIT_REVERTED) {
|
||||
regionOnline(p, sn);
|
||||
regionOffline(a);
|
||||
regionOffline(b);
|
||||
|
||||
if (getTableStateManager().isTableState(p.getTable(),
|
||||
ZooKeeperProtos.Table.State.DISABLED, ZooKeeperProtos.Table.State.DISABLING)) {
|
||||
invokeUnAssign(p);
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
@ -4338,11 +4355,18 @@ public class AssignmentManager extends ZooKeeperListener {
|
|||
} catch (IOException exp) {
|
||||
errorMsg = StringUtils.stringifyException(exp);
|
||||
}
|
||||
break;
|
||||
case SPLIT_PONR:
|
||||
case SPLIT:
|
||||
errorMsg =
|
||||
onRegionSplit(serverName, code, hri, HRegionInfo.convert(transition.getRegionInfo(1)),
|
||||
HRegionInfo.convert(transition.getRegionInfo(2)));
|
||||
break;
|
||||
|
||||
case SPLIT_REVERTED:
|
||||
errorMsg =
|
||||
onRegionSplit(serverName, code, hri, HRegionInfo.convert(transition.getRegionInfo(1)),
|
||||
onRegionSplitReverted(serverName, hri,
|
||||
HRegionInfo.convert(transition.getRegionInfo(1)),
|
||||
HRegionInfo.convert(transition.getRegionInfo(2)));
|
||||
if (org.apache.commons.lang.StringUtils.isEmpty(errorMsg)) {
|
||||
try {
|
||||
|
|
|
@ -134,7 +134,7 @@ public class TableNamespaceManager {
|
|||
return nsTable;
|
||||
}
|
||||
|
||||
private synchronized boolean acquireSharedLock() throws IOException {
|
||||
private boolean acquireSharedLock() throws IOException {
|
||||
try {
|
||||
return rwLock.readLock().tryLock(sharedLockTimeoutMs, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -142,11 +142,11 @@ public class TableNamespaceManager {
|
|||
}
|
||||
}
|
||||
|
||||
public synchronized void releaseSharedLock() {
|
||||
public void releaseSharedLock() {
|
||||
rwLock.readLock().unlock();
|
||||
}
|
||||
|
||||
public synchronized boolean acquireExclusiveLock() {
|
||||
public boolean acquireExclusiveLock() {
|
||||
try {
|
||||
return rwLock.writeLock().tryLock(exclusiveLockTimeoutMs, TimeUnit.MILLISECONDS);
|
||||
} catch (InterruptedException e) {
|
||||
|
@ -154,7 +154,7 @@ public class TableNamespaceManager {
|
|||
}
|
||||
}
|
||||
|
||||
public synchronized void releaseExclusiveLock() {
|
||||
public void releaseExclusiveLock() {
|
||||
rwLock.writeLock().unlock();
|
||||
}
|
||||
|
||||
|
|
|
@ -27,8 +27,11 @@ import org.apache.hadoop.hbase.ServerName;
|
|||
import org.apache.hadoop.hbase.TableName;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.master.MasterServices;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.util.Triple;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collections;
|
||||
import java.util.Comparator;
|
||||
import java.util.List;
|
||||
|
||||
/**
|
||||
|
@ -45,6 +48,10 @@ import java.util.List;
|
|||
* to merge, if R1 + R1 < S, and normalization stops
|
||||
* <li> Otherwise, no action is performed
|
||||
* </ol>
|
||||
* <p>
|
||||
* Region sizes are coarse and approximate on the order of megabytes. Additionally,
|
||||
* "empty" regions (less than 1MB, with the previous note) are not merged away. This
|
||||
* is by design to prevent normalization from undoing the pre-splitting of a table.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public class SimpleRegionNormalizer implements RegionNormalizer {
|
||||
|
@ -62,6 +69,22 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
|
|||
this.masterServices = masterServices;
|
||||
}
|
||||
|
||||
/*
|
||||
* This comparator compares the region size.
|
||||
* The second element in the triple is region size while the 3rd element
|
||||
* is the index of the region in the underlying List
|
||||
*/
|
||||
private Comparator<Triple<HRegionInfo, Long, Integer>> regionSizeComparator =
|
||||
new Comparator<Triple<HRegionInfo, Long, Integer>>() {
|
||||
@Override
|
||||
public int compare(Triple<HRegionInfo, Long, Integer> pair,
|
||||
Triple<HRegionInfo, Long, Integer> pair2) {
|
||||
long sz = pair.getSecond();
|
||||
long sz2 = pair2.getSecond();
|
||||
return (sz < sz2) ? -1 : ((sz == sz2) ? 0 : 1);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Computes next most "urgent" normalization action on the table.
|
||||
* Action may be either a split, or a merge, or no action.
|
||||
|
@ -72,7 +95,7 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
|
|||
@Override
|
||||
public NormalizationPlan computePlanForTable(TableName table) throws HBaseIOException {
|
||||
if (table == null || table.isSystemTable()) {
|
||||
LOG.debug("Normalization of table " + table + " isn't allowed");
|
||||
LOG.debug("Normalization of system table " + table + " isn't allowed");
|
||||
return EmptyNormalizationPlan.getInstance();
|
||||
}
|
||||
|
||||
|
@ -91,57 +114,18 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
|
|||
", number of regions: " + tableRegions.size());
|
||||
|
||||
long totalSizeMb = 0;
|
||||
Pair<HRegionInfo, Long> largestRegion = new Pair<>();
|
||||
|
||||
// A is a smallest region, B is it's smallest neighbor
|
||||
Pair<HRegionInfo, Long> smallestRegion = new Pair<>();
|
||||
int smallestRegionIndex = 0;
|
||||
|
||||
ArrayList<Triple<HRegionInfo, Long, Integer>> regionsWithSize =
|
||||
new ArrayList<Triple<HRegionInfo, Long, Integer>>(tableRegions.size());
|
||||
for (int i = 0; i < tableRegions.size(); i++) {
|
||||
HRegionInfo hri = tableRegions.get(i);
|
||||
long regionSize = getRegionSize(hri);
|
||||
regionsWithSize.add(new Triple<HRegionInfo, Long, Integer>(hri, regionSize, i));
|
||||
totalSizeMb += regionSize;
|
||||
|
||||
if (largestRegion.getFirst() == null || regionSize > largestRegion.getSecond()) {
|
||||
largestRegion.setFirst(hri);
|
||||
largestRegion.setSecond(regionSize);
|
||||
}
|
||||
|
||||
if (smallestRegion.getFirst() == null || regionSize < smallestRegion.getSecond()) {
|
||||
smallestRegion.setFirst(hri);
|
||||
smallestRegion.setSecond(regionSize);
|
||||
smallestRegionIndex = i;
|
||||
}
|
||||
}
|
||||
Collections.sort(regionsWithSize, regionSizeComparator);
|
||||
|
||||
// now get smallest neighbor of smallest region
|
||||
long leftNeighborSize = -1;
|
||||
|
||||
if (smallestRegionIndex > 0) {
|
||||
leftNeighborSize = getRegionSize(tableRegions.get(smallestRegionIndex - 1));
|
||||
}
|
||||
|
||||
long rightNeighborSize = -1;
|
||||
if (smallestRegionIndex < tableRegions.size() - 1) {
|
||||
rightNeighborSize = getRegionSize(tableRegions.get(smallestRegionIndex + 1));
|
||||
}
|
||||
|
||||
Pair<HRegionInfo, Long> smallestNeighborOfSmallestRegion;
|
||||
if (leftNeighborSize == -1) {
|
||||
smallestNeighborOfSmallestRegion =
|
||||
new Pair<>(tableRegions.get(smallestRegionIndex + 1), rightNeighborSize);
|
||||
} else if (rightNeighborSize == -1) {
|
||||
smallestNeighborOfSmallestRegion =
|
||||
new Pair<>(tableRegions.get(smallestRegionIndex - 1), leftNeighborSize);
|
||||
} else {
|
||||
if (leftNeighborSize < rightNeighborSize) {
|
||||
smallestNeighborOfSmallestRegion =
|
||||
new Pair<>(tableRegions.get(smallestRegionIndex - 1), leftNeighborSize);
|
||||
} else {
|
||||
smallestNeighborOfSmallestRegion =
|
||||
new Pair<>(tableRegions.get(smallestRegionIndex + 1), rightNeighborSize);
|
||||
}
|
||||
}
|
||||
Triple<HRegionInfo, Long, Integer> largestRegion = regionsWithSize.get(tableRegions.size()-1);
|
||||
|
||||
double avgRegionSize = totalSizeMb / (double) tableRegions.size();
|
||||
|
||||
|
@ -155,19 +139,31 @@ public class SimpleRegionNormalizer implements RegionNormalizer {
|
|||
+ largestRegion.getFirst().getRegionNameAsString() + " has size "
|
||||
+ largestRegion.getSecond() + ", more than 2 times than avg size, splitting");
|
||||
return new SplitNormalizationPlan(largestRegion.getFirst(), null);
|
||||
} else {
|
||||
if (smallestRegion.getSecond() + smallestNeighborOfSmallestRegion.getSecond()
|
||||
< avgRegionSize) {
|
||||
LOG.debug("Table " + table + ", smallest region size: " + smallestRegion.getSecond()
|
||||
+ " and its smallest neighbor size: " + smallestNeighborOfSmallestRegion.getSecond()
|
||||
+ ", less than the avg size, merging them");
|
||||
return new MergeNormalizationPlan(smallestRegion.getFirst(),
|
||||
smallestNeighborOfSmallestRegion.getFirst());
|
||||
} else {
|
||||
LOG.debug("No normalization needed, regions look good for table: " + table);
|
||||
return EmptyNormalizationPlan.getInstance();
|
||||
}
|
||||
}
|
||||
int candidateIdx = 0;
|
||||
// look for two successive entries whose indices are adjacent
|
||||
while (candidateIdx < tableRegions.size()-1) {
|
||||
if (Math.abs(regionsWithSize.get(candidateIdx).getThird() -
|
||||
regionsWithSize.get(candidateIdx + 1).getThird()) == 1) {
|
||||
break;
|
||||
}
|
||||
candidateIdx++;
|
||||
}
|
||||
if (candidateIdx == tableRegions.size()-1) {
|
||||
LOG.debug("No neighboring regions found for table: " + table);
|
||||
return EmptyNormalizationPlan.getInstance();
|
||||
}
|
||||
Triple<HRegionInfo, Long, Integer> candidateRegion = regionsWithSize.get(candidateIdx);
|
||||
Triple<HRegionInfo, Long, Integer> candidateRegion2 = regionsWithSize.get(candidateIdx+1);
|
||||
if (candidateRegion.getSecond() + candidateRegion2.getSecond() < avgRegionSize) {
|
||||
LOG.debug("Table " + table + ", smallest region size: " + candidateRegion.getSecond()
|
||||
+ " and its smallest neighbor size: " + candidateRegion2.getSecond()
|
||||
+ ", less than the avg size, merging them");
|
||||
return new MergeNormalizationPlan(candidateRegion.getFirst(),
|
||||
candidateRegion2.getFirst());
|
||||
}
|
||||
LOG.debug("No normalization needed, regions look good for table: " + table);
|
||||
return EmptyNormalizationPlan.getInstance();
|
||||
}
|
||||
|
||||
private long getRegionSize(HRegionInfo hri) {
|
||||
|
|
|
@ -24,10 +24,8 @@ import java.util.concurrent.CountDownLatch;
|
|||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceStability;
|
||||
import org.apache.hadoop.hbase.ipc.RpcServer;
|
||||
import org.apache.hadoop.hbase.ipc.RpcCallContext;
|
||||
import org.apache.hadoop.hbase.client.VersionInfoUtil;
|
||||
import org.apache.hadoop.hbase.procedure2.Procedure;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos.VersionInfo;
|
||||
|
||||
/**
|
||||
* Latch used by the Master to have the prepare() sync behaviour for old
|
||||
|
@ -44,24 +42,7 @@ public abstract class ProcedurePrepareLatch {
|
|||
}
|
||||
|
||||
public static boolean hasProcedureSupport() {
|
||||
return currentClientHasMinimumVersion(1, 1);
|
||||
}
|
||||
|
||||
private static boolean currentClientHasMinimumVersion(int major, int minor) {
|
||||
RpcCallContext call = RpcServer.getCurrentCall();
|
||||
VersionInfo versionInfo = call != null ? call.getClientVersionInfo() : null;
|
||||
if (versionInfo != null) {
|
||||
String[] components = versionInfo.getVersion().split("\\.");
|
||||
|
||||
int clientMajor = components.length > 0 ? Integer.parseInt(components[0]) : 0;
|
||||
if (clientMajor != major) {
|
||||
return clientMajor > major;
|
||||
}
|
||||
|
||||
int clientMinor = components.length > 1 ? Integer.parseInt(components[1]) : 0;
|
||||
return clientMinor >= minor;
|
||||
}
|
||||
return false;
|
||||
return VersionInfoUtil.currentClientHasMinimumVersion(1, 1);
|
||||
}
|
||||
|
||||
protected abstract void countDown(final Procedure proc);
|
||||
|
|
|
@ -28,22 +28,23 @@ import java.util.Map;
|
|||
import java.util.NavigableMap;
|
||||
import java.util.UUID;
|
||||
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.fs.Path;
|
||||
import org.apache.hadoop.hbase.Cell;
|
||||
import org.apache.hadoop.hbase.CellScanner;
|
||||
import org.apache.hadoop.hbase.CellUtil;
|
||||
import org.apache.hadoop.hbase.HConstants;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.io.SizedCellScanner;
|
||||
import org.apache.hadoop.hbase.ipc.PayloadCarryingRpcController;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.AdminProtos.AdminService;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.HBaseProtos;
|
||||
import org.apache.hadoop.hbase.protobuf.generated.WALProtos;
|
||||
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
import org.apache.hadoop.hbase.regionserver.wal.WALEdit;
|
||||
import org.apache.hadoop.hbase.util.ByteStringer;
|
||||
import org.apache.hadoop.hbase.util.Pair;
|
||||
import org.apache.hadoop.hbase.wal.WAL.Entry;
|
||||
import org.apache.hadoop.hbase.wal.WALKey;
|
||||
|
||||
import com.google.protobuf.ServiceException;
|
||||
|
||||
|
@ -51,15 +52,20 @@ import com.google.protobuf.ServiceException;
|
|||
public class ReplicationProtbufUtil {
|
||||
/**
|
||||
* A helper to replicate a list of WAL entries using admin protocol.
|
||||
*
|
||||
* @param admin
|
||||
* @param entries
|
||||
* @param admin Admin service
|
||||
* @param entries Array of WAL entries to be replicated
|
||||
* @param replicationClusterId Id which will uniquely identify source cluster FS client
|
||||
* configurations in the replication configuration directory
|
||||
* @param sourceBaseNamespaceDir Path to source cluster base namespace directory
|
||||
* @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
|
||||
* @throws java.io.IOException
|
||||
*/
|
||||
public static void replicateWALEntry(final AdminService.BlockingInterface admin,
|
||||
final Entry[] entries) throws IOException {
|
||||
final Entry[] entries, String replicationClusterId, Path sourceBaseNamespaceDir,
|
||||
Path sourceHFileArchiveDir) throws IOException {
|
||||
Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner> p =
|
||||
buildReplicateWALEntryRequest(entries, null);
|
||||
buildReplicateWALEntryRequest(entries, null, replicationClusterId, sourceBaseNamespaceDir,
|
||||
sourceHFileArchiveDir);
|
||||
PayloadCarryingRpcController controller = new PayloadCarryingRpcController(p.getSecond());
|
||||
try {
|
||||
admin.replicateWALEntry(controller, p.getFirst());
|
||||
|
@ -78,19 +84,22 @@ public class ReplicationProtbufUtil {
|
|||
public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
|
||||
buildReplicateWALEntryRequest(final Entry[] entries) {
|
||||
// Accumulate all the Cells seen in here.
|
||||
return buildReplicateWALEntryRequest(entries, null);
|
||||
return buildReplicateWALEntryRequest(entries, null, null, null, null);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new ReplicateWALEntryRequest from a list of HLog entries
|
||||
*
|
||||
* @param entries the HLog entries to be replicated
|
||||
* @param encodedRegionName alternative region name to use if not null
|
||||
* @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values
|
||||
* found.
|
||||
* @param replicationClusterId Id which will uniquely identify source cluster FS client
|
||||
* configurations in the replication configuration directory
|
||||
* @param sourceBaseNamespaceDir Path to source cluster base namespace directory
|
||||
* @param sourceHFileArchiveDir Path to the source cluster hfile archive directory
|
||||
* @return a pair of ReplicateWALEntryRequest and a CellScanner over all the WALEdit values found.
|
||||
*/
|
||||
public static Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>
|
||||
buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName) {
|
||||
buildReplicateWALEntryRequest(final Entry[] entries, byte[] encodedRegionName,
|
||||
String replicationClusterId, Path sourceBaseNamespaceDir, Path sourceHFileArchiveDir) {
|
||||
// Accumulate all the KVs seen in here.
|
||||
List<List<? extends Cell>> allCells = new ArrayList<List<? extends Cell>>(entries.length);
|
||||
int size = 0;
|
||||
|
@ -147,6 +156,17 @@ public class ReplicationProtbufUtil {
|
|||
entryBuilder.setAssociatedCellCount(cells.size());
|
||||
builder.addEntry(entryBuilder.build());
|
||||
}
|
||||
|
||||
if (replicationClusterId != null) {
|
||||
builder.setReplicationClusterId(replicationClusterId);
|
||||
}
|
||||
if (sourceBaseNamespaceDir != null) {
|
||||
builder.setSourceBaseNamespaceDirPath(sourceBaseNamespaceDir.toString());
|
||||
}
|
||||
if (sourceHFileArchiveDir != null) {
|
||||
builder.setSourceHFileArchiveDirPath(sourceHFileArchiveDir.toString());
|
||||
}
|
||||
|
||||
return new Pair<AdminProtos.ReplicateWALEntryRequest, CellScanner>(builder.build(),
|
||||
getCellScanner(allCells, size));
|
||||
}
|
||||
|
|
|
@ -617,8 +617,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
|
|||
LOG.info("Changing the value of " + LARGE_COMPACTION_THREADS +
|
||||
" from " + this.longCompactions.getCorePoolSize() + " to " +
|
||||
largeThreads);
|
||||
this.longCompactions.setMaximumPoolSize(largeThreads);
|
||||
this.longCompactions.setCorePoolSize(largeThreads);
|
||||
if(this.longCompactions.getCorePoolSize() < largeThreads) {
|
||||
this.longCompactions.setMaximumPoolSize(largeThreads);
|
||||
this.longCompactions.setCorePoolSize(largeThreads);
|
||||
} else {
|
||||
this.longCompactions.setCorePoolSize(largeThreads);
|
||||
this.longCompactions.setMaximumPoolSize(largeThreads);
|
||||
}
|
||||
}
|
||||
|
||||
int smallThreads = newConf.getInt(SMALL_COMPACTION_THREADS,
|
||||
|
@ -627,8 +632,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
|
|||
LOG.info("Changing the value of " + SMALL_COMPACTION_THREADS +
|
||||
" from " + this.shortCompactions.getCorePoolSize() + " to " +
|
||||
smallThreads);
|
||||
this.shortCompactions.setMaximumPoolSize(smallThreads);
|
||||
this.shortCompactions.setCorePoolSize(smallThreads);
|
||||
if(this.shortCompactions.getCorePoolSize() < smallThreads) {
|
||||
this.shortCompactions.setMaximumPoolSize(smallThreads);
|
||||
this.shortCompactions.setCorePoolSize(smallThreads);
|
||||
} else {
|
||||
this.shortCompactions.setCorePoolSize(smallThreads);
|
||||
this.shortCompactions.setMaximumPoolSize(smallThreads);
|
||||
}
|
||||
}
|
||||
|
||||
int splitThreads = newConf.getInt(SPLIT_THREADS,
|
||||
|
@ -637,8 +647,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
|
|||
LOG.info("Changing the value of " + SPLIT_THREADS +
|
||||
" from " + this.splits.getCorePoolSize() + " to " +
|
||||
splitThreads);
|
||||
this.splits.setMaximumPoolSize(smallThreads);
|
||||
this.splits.setCorePoolSize(smallThreads);
|
||||
if(this.splits.getCorePoolSize() < splitThreads) {
|
||||
this.splits.setMaximumPoolSize(splitThreads);
|
||||
this.splits.setCorePoolSize(splitThreads);
|
||||
} else {
|
||||
this.splits.setCorePoolSize(splitThreads);
|
||||
this.splits.setMaximumPoolSize(splitThreads);
|
||||
}
|
||||
}
|
||||
|
||||
int mergeThreads = newConf.getInt(MERGE_THREADS,
|
||||
|
@ -647,8 +662,13 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
|
|||
LOG.info("Changing the value of " + MERGE_THREADS +
|
||||
" from " + this.mergePool.getCorePoolSize() + " to " +
|
||||
mergeThreads);
|
||||
this.mergePool.setMaximumPoolSize(smallThreads);
|
||||
this.mergePool.setCorePoolSize(smallThreads);
|
||||
if(this.mergePool.getCorePoolSize() < mergeThreads) {
|
||||
this.mergePool.setMaximumPoolSize(mergeThreads);
|
||||
this.mergePool.setCorePoolSize(mergeThreads);
|
||||
} else {
|
||||
this.mergePool.setCorePoolSize(mergeThreads);
|
||||
this.mergePool.setMaximumPoolSize(mergeThreads);
|
||||
}
|
||||
}
|
||||
|
||||
CompactionThroughputController old = this.compactionThroughputController;
|
||||
|
@ -667,10 +687,18 @@ public class CompactSplitThread implements CompactionRequestor, PropagatingConfi
|
|||
return this.shortCompactions.getCorePoolSize();
|
||||
}
|
||||
|
||||
public int getLargeCompactionThreadNum() {
|
||||
protected int getLargeCompactionThreadNum() {
|
||||
return this.longCompactions.getCorePoolSize();
|
||||
}
|
||||
|
||||
protected int getSplitThreadNum() {
|
||||
return this.splits.getCorePoolSize();
|
||||
}
|
||||
|
||||
protected int getMergeThreadNum() {
|
||||
return this.mergePool.getCorePoolSize();
|
||||
}
|
||||
|
||||
/**
|
||||
* {@inheritDoc}
|
||||
*/
|
||||
|
|
|
@ -4132,11 +4132,11 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
continue;
|
||||
}
|
||||
}
|
||||
boolean checkRowWithinBoundary = false;
|
||||
// Check this edit is for this region.
|
||||
if (!Bytes.equals(key.getEncodedRegionName(),
|
||||
this.getRegionInfo().getEncodedNameAsBytes())) {
|
||||
skippedEdits++;
|
||||
continue;
|
||||
checkRowWithinBoundary = true;
|
||||
}
|
||||
|
||||
boolean flush = false;
|
||||
|
@ -4144,11 +4144,14 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
// Check this edit is for me. Also, guard against writing the special
|
||||
// METACOLUMN info such as HBASE::CACHEFLUSH entries
|
||||
if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) {
|
||||
//this is a special edit, we should handle it
|
||||
CompactionDescriptor compaction = WALEdit.getCompaction(cell);
|
||||
if (compaction != null) {
|
||||
//replay the compaction
|
||||
replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE);
|
||||
// if region names don't match, skipp replaying compaction marker
|
||||
if (!checkRowWithinBoundary) {
|
||||
//this is a special edit, we should handle it
|
||||
CompactionDescriptor compaction = WALEdit.getCompaction(cell);
|
||||
if (compaction != null) {
|
||||
//replay the compaction
|
||||
replayWALCompactionMarker(compaction, false, true, Long.MAX_VALUE);
|
||||
}
|
||||
}
|
||||
skippedEdits++;
|
||||
continue;
|
||||
|
@ -4164,6 +4167,12 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
skippedEdits++;
|
||||
continue;
|
||||
}
|
||||
if (checkRowWithinBoundary && !rowIsInRange(this.getRegionInfo(),
|
||||
cell.getRowArray(), cell.getRowOffset(), cell.getRowLength())) {
|
||||
LOG.warn("Row of " + cell + " is not within region boundary");
|
||||
skippedEdits++;
|
||||
continue;
|
||||
}
|
||||
// Now, figure if we should skip this edit.
|
||||
if (key.getLogSeqNum() <= maxSeqIdInStores.get(store.getFamily()
|
||||
.getName())) {
|
||||
|
@ -4234,8 +4243,16 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
void replayWALCompactionMarker(CompactionDescriptor compaction, boolean pickCompactionFiles,
|
||||
boolean removeFiles, long replaySeqId)
|
||||
throws IOException {
|
||||
checkTargetRegion(compaction.getEncodedRegionName().toByteArray(),
|
||||
"Compaction marker from WAL ", compaction);
|
||||
try {
|
||||
checkTargetRegion(compaction.getEncodedRegionName().toByteArray(),
|
||||
"Compaction marker from WAL ", compaction);
|
||||
} catch (WrongRegionException wre) {
|
||||
if (RegionReplicaUtil.isDefaultReplica(this.getRegionInfo())) {
|
||||
// skip the compaction marker since it is not for this region
|
||||
return;
|
||||
}
|
||||
throw wre;
|
||||
}
|
||||
|
||||
synchronized (writestate) {
|
||||
if (replaySeqId < lastReplayedOpenRegionSeqId) {
|
||||
|
@ -6663,6 +6680,15 @@ public class HRegion implements HeapSize, PropagatingConfigurationObserver, Regi
|
|||
(Bytes.compareTo(info.getEndKey(), row) > 0));
|
||||
}
|
||||
|
||||
public static boolean rowIsInRange(HRegionInfo info, final byte [] row, final int offset,
|
||||
final short length) {
|
||||
return ((info.getStartKey().length == 0) ||
|
||||
(Bytes.compareTo(info.getStartKey(), 0, info.getStartKey().length,
|
||||
row, offset, length) <= 0)) &&
|
||||
((info.getEndKey().length == 0) ||
|
||||
(Bytes.compareTo(info.getEndKey(), 0, info.getEndKey().length, row, offset, length) > 0));
|
||||
}
|
||||
|
||||
/**
|
||||
* Merge two HRegions. The regions must be adjacent and must not overlap.
|
||||
*
|
||||
|
|
|
@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.MetaTableAccessor;
|
||||
import org.apache.hadoop.hbase.MultiActionResultTooLarge;
|
||||
import org.apache.hadoop.hbase.NotServingRegionException;
|
||||
import org.apache.hadoop.hbase.ServerName;
|
||||
import org.apache.hadoop.hbase.TableName;
|
||||
|
@ -67,6 +68,7 @@ import org.apache.hadoop.hbase.client.RegionReplicaUtil;
|
|||
import org.apache.hadoop.hbase.client.Result;
|
||||
import org.apache.hadoop.hbase.client.RowMutations;
|
||||
import org.apache.hadoop.hbase.client.Scan;
|
||||
import org.apache.hadoop.hbase.client.VersionInfoUtil;
|
||||
import org.apache.hadoop.hbase.conf.ConfigurationObserver;
|
||||
import org.apache.hadoop.hbase.coordination.CloseRegionCoordination;
|
||||
import org.apache.hadoop.hbase.coordination.OpenRegionCoordination;
|
||||
|
@ -366,8 +368,11 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
* @return True if current call supports cellblocks
|
||||
*/
|
||||
private boolean isClientCellBlockSupport() {
|
||||
RpcCallContext context = RpcServer.getCurrentCall();
|
||||
return context != null && context.isClientCellBlockSupport();
|
||||
return isClientCellBlockSupport(RpcServer.getCurrentCall());
|
||||
}
|
||||
|
||||
private boolean isClientCellBlockSupport(RpcCallContext context) {
|
||||
return context != null && context.isClientCellBlockSupported();
|
||||
}
|
||||
|
||||
private void addResult(final MutateResponse.Builder builder,
|
||||
|
@ -426,13 +431,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
rm = new RowMutations(action.getMutation().getRow().toByteArray());
|
||||
}
|
||||
switch (type) {
|
||||
case PUT:
|
||||
rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
|
||||
break;
|
||||
case DELETE:
|
||||
rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
|
||||
break;
|
||||
default:
|
||||
case PUT:
|
||||
rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
|
||||
break;
|
||||
case DELETE:
|
||||
rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
|
||||
break;
|
||||
default:
|
||||
throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
|
||||
}
|
||||
}
|
||||
|
@ -469,14 +474,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
rm = new RowMutations(action.getMutation().getRow().toByteArray());
|
||||
}
|
||||
switch (type) {
|
||||
case PUT:
|
||||
rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
|
||||
break;
|
||||
case DELETE:
|
||||
rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
|
||||
break;
|
||||
default:
|
||||
throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
|
||||
case PUT:
|
||||
rm.add(ProtobufUtil.toPut(action.getMutation(), cellScanner));
|
||||
break;
|
||||
case DELETE:
|
||||
rm.add(ProtobufUtil.toDelete(action.getMutation(), cellScanner));
|
||||
break;
|
||||
default:
|
||||
throw new DoNotRetryIOException("Atomic put and/or delete only, not " + type.name());
|
||||
}
|
||||
}
|
||||
return region.checkAndRowMutate(row, family, qualifier, compareOp, comparator, rm, Boolean.TRUE);
|
||||
|
@ -577,10 +582,46 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
// ResultOrException instance that matches each Put or Delete is then added down in the
|
||||
// doBatchOp call. We should be staying aligned though the Put and Delete are deferred/batched
|
||||
List<ClientProtos.Action> mutations = null;
|
||||
for (ClientProtos.Action action: actions.getActionList()) {
|
||||
long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
|
||||
RpcCallContext context = RpcServer.getCurrentCall();
|
||||
IOException sizeIOE = null;
|
||||
Object lastBlock = null;
|
||||
for (ClientProtos.Action action : actions.getActionList()) {
|
||||
ClientProtos.ResultOrException.Builder resultOrExceptionBuilder = null;
|
||||
try {
|
||||
Result r = null;
|
||||
|
||||
if (context != null
|
||||
&& context.isRetryImmediatelySupported()
|
||||
&& (context.getResponseCellSize() > maxQuotaResultSize
|
||||
|| context.getResponseBlockSize() > maxQuotaResultSize)) {
|
||||
|
||||
// We're storing the exception since the exception and reason string won't
|
||||
// change after the response size limit is reached.
|
||||
if (sizeIOE == null ) {
|
||||
// We don't need the stack un-winding do don't throw the exception.
|
||||
// Throwing will kill the JVM's JIT.
|
||||
//
|
||||
// Instead just create the exception and then store it.
|
||||
sizeIOE = new MultiActionResultTooLarge("Max size exceeded"
|
||||
+ " CellSize: " + context.getResponseCellSize()
|
||||
+ " BlockSize: " + context.getResponseBlockSize());
|
||||
|
||||
// Only report the exception once since there's only one request that
|
||||
// caused the exception. Otherwise this number will dominate the exceptions count.
|
||||
rpcServer.getMetrics().exception(sizeIOE);
|
||||
}
|
||||
|
||||
// Now that there's an exception is known to be created
|
||||
// use it for the response.
|
||||
//
|
||||
// This will create a copy in the builder.
|
||||
resultOrExceptionBuilder = ResultOrException.newBuilder().
|
||||
setException(ResponseConverter.buildException(sizeIOE));
|
||||
resultOrExceptionBuilder.setIndex(action.getIndex());
|
||||
builder.addResultOrException(resultOrExceptionBuilder.build());
|
||||
continue;
|
||||
}
|
||||
if (action.hasGet()) {
|
||||
Get get = ProtobufUtil.toGet(action.getGet());
|
||||
r = region.get(get);
|
||||
|
@ -633,11 +674,14 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
if (isClientCellBlockSupport()) {
|
||||
pbResult = ProtobufUtil.toResultNoData(r);
|
||||
// Hard to guess the size here. Just make a rough guess.
|
||||
if (cellsToReturn == null) cellsToReturn = new ArrayList<CellScannable>();
|
||||
if (cellsToReturn == null) {
|
||||
cellsToReturn = new ArrayList<CellScannable>();
|
||||
}
|
||||
cellsToReturn.add(r);
|
||||
} else {
|
||||
pbResult = ProtobufUtil.toResult(r);
|
||||
}
|
||||
lastBlock = addSize(context, r, lastBlock);
|
||||
resultOrExceptionBuilder =
|
||||
ClientProtos.ResultOrException.newBuilder().setResult(pbResult);
|
||||
}
|
||||
|
@ -719,8 +763,8 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
|
||||
case SUCCESS:
|
||||
builder.addResultOrException(getResultOrException(
|
||||
ClientProtos.Result.getDefaultInstance(), index,
|
||||
((HRegion)region).getRegionStats()));
|
||||
ClientProtos.Result.getDefaultInstance(), index,
|
||||
((HRegion) region).getRegionStats()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -869,13 +913,13 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
ConnectionUtils.setServerSideHConnectionRetriesConfig(rs.conf, name, LOG);
|
||||
try {
|
||||
rpcServer = new RpcServer(rs, name, getServices(),
|
||||
bindAddress, // use final bindAddress for this server.
|
||||
rs.conf,
|
||||
rpcSchedulerFactory.create(rs.conf, this, rs));
|
||||
} catch(BindException be) {
|
||||
bindAddress, // use final bindAddress for this server.
|
||||
rs.conf,
|
||||
rpcSchedulerFactory.create(rs.conf, this, rs));
|
||||
} catch (BindException be) {
|
||||
String configName = (this instanceof MasterRpcServices) ? HConstants.MASTER_PORT :
|
||||
HConstants.REGIONSERVER_PORT;
|
||||
throw new IOException(be.getMessage() + ". To switch ports use the '" + configName +
|
||||
HConstants.REGIONSERVER_PORT;
|
||||
throw new IOException(be.getMessage() + ". To switch ports use the '" + configName +
|
||||
"' configuration property.", be.getCause() != null ? be.getCause() : be);
|
||||
}
|
||||
|
||||
|
@ -963,6 +1007,31 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
return scannerId;
|
||||
}
|
||||
|
||||
/**
|
||||
* Method to account for the size of retained cells and retained data blocks.
|
||||
* @return an object that represents the last referenced block from this response.
|
||||
*/
|
||||
Object addSize(RpcCallContext context, Result r, Object lastBlock) {
|
||||
if (context != null && !r.isEmpty()) {
|
||||
for (Cell c : r.rawCells()) {
|
||||
context.incrementResponseCellSize(CellUtil.estimatedHeapSizeOf(c));
|
||||
// We're using the last block being the same as the current block as
|
||||
// a proxy for pointing to a new block. This won't be exact.
|
||||
// If there are multiple gets that bounce back and forth
|
||||
// Then it's possible that this will over count the size of
|
||||
// referenced blocks. However it's better to over count and
|
||||
// use two RPC's than to OOME the RegionServer.
|
||||
byte[] valueArray = c.getValueArray();
|
||||
if (valueArray != lastBlock) {
|
||||
context.incrementResponseBlockSize(valueArray.length);
|
||||
lastBlock = valueArray;
|
||||
}
|
||||
}
|
||||
}
|
||||
return lastBlock;
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Find the HRegion based on a region specifier
|
||||
*
|
||||
|
@ -1740,7 +1809,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
List<WALEntry> entries = request.getEntryList();
|
||||
CellScanner cellScanner = ((PayloadCarryingRpcController)controller).cellScanner();
|
||||
regionServer.getRegionServerCoprocessorHost().preReplicateLogEntries(entries, cellScanner);
|
||||
regionServer.replicationSinkHandler.replicateLogEntries(entries, cellScanner);
|
||||
regionServer.replicationSinkHandler.replicateLogEntries(entries, cellScanner,
|
||||
request.getReplicationClusterId(), request.getSourceBaseNamespaceDirPath(),
|
||||
request.getSourceHFileArchiveDirPath());
|
||||
regionServer.getRegionServerCoprocessorHost().postReplicateLogEntries(entries, cellScanner);
|
||||
return ReplicateWALEntryResponse.newBuilder().build();
|
||||
} else {
|
||||
|
@ -1964,7 +2035,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
ProtobufUtil.toResult(existence, region.getRegionInfo().getReplicaId() != 0);
|
||||
builder.setResult(pbr);
|
||||
} else if (r != null) {
|
||||
ClientProtos.Result pbr = ProtobufUtil.toResult(r);
|
||||
ClientProtos.Result pbr;
|
||||
RpcCallContext call = RpcServer.getCurrentCall();
|
||||
if (isClientCellBlockSupport(call) && controller instanceof PayloadCarryingRpcController
|
||||
&& VersionInfoUtil.hasMinimumVersion(call.getClientVersionInfo(), 1, 3)) {
|
||||
pbr = ProtobufUtil.toResultNoData(r);
|
||||
((PayloadCarryingRpcController) controller)
|
||||
.setCellScanner(CellUtil.createCellScanner(r.rawCells()));
|
||||
} else {
|
||||
pbr = ProtobufUtil.toResult(r);
|
||||
}
|
||||
builder.setResult(pbr);
|
||||
}
|
||||
if (r != null) {
|
||||
|
@ -2004,7 +2084,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
// It is also the conduit via which we pass back data.
|
||||
PayloadCarryingRpcController controller = (PayloadCarryingRpcController)rpcc;
|
||||
CellScanner cellScanner = controller != null ? controller.cellScanner(): null;
|
||||
if (controller != null) controller.setCellScanner(null);
|
||||
if (controller != null) {
|
||||
controller.setCellScanner(null);
|
||||
}
|
||||
|
||||
long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
|
||||
|
||||
|
@ -2070,7 +2152,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
if (cellsToReturn != null && !cellsToReturn.isEmpty() && controller != null) {
|
||||
controller.setCellScanner(CellUtil.createCellScanner(cellsToReturn));
|
||||
}
|
||||
if (processed != null) responseBuilder.setProcessed(processed);
|
||||
if (processed != null) {
|
||||
responseBuilder.setProcessed(processed);
|
||||
}
|
||||
return responseBuilder.build();
|
||||
}
|
||||
|
||||
|
@ -2087,10 +2171,12 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
// rpc controller is how we bring in data via the back door; it is unprotobuf'ed data.
|
||||
// It is also the conduit via which we pass back data.
|
||||
PayloadCarryingRpcController controller = (PayloadCarryingRpcController)rpcc;
|
||||
CellScanner cellScanner = controller != null? controller.cellScanner(): null;
|
||||
CellScanner cellScanner = controller != null ? controller.cellScanner() : null;
|
||||
OperationQuota quota = null;
|
||||
// Clear scanner so we are not holding on to reference across call.
|
||||
if (controller != null) controller.setCellScanner(null);
|
||||
if (controller != null) {
|
||||
controller.setCellScanner(null);
|
||||
}
|
||||
try {
|
||||
checkOpen();
|
||||
requestCount.increment();
|
||||
|
@ -2243,6 +2329,9 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
boolean moreResults = true;
|
||||
boolean closeScanner = false;
|
||||
boolean isSmallScan = false;
|
||||
RpcCallContext context = RpcServer.getCurrentCall();
|
||||
Object lastBlock = null;
|
||||
|
||||
ScanResponse.Builder builder = ScanResponse.newBuilder();
|
||||
if (request.hasCloseScanner()) {
|
||||
closeScanner = request.getCloseScanner();
|
||||
|
@ -2296,6 +2385,16 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
scannerName = String.valueOf(scannerId);
|
||||
ttl = this.scannerLeaseTimeoutPeriod;
|
||||
}
|
||||
if (request.hasRenew() && request.getRenew()) {
|
||||
rsh = scanners.get(scannerName);
|
||||
lease = regionServer.leases.removeLease(scannerName);
|
||||
if (lease != null && rsh != null) {
|
||||
regionServer.leases.addLease(lease);
|
||||
// Increment the nextCallSeq value which is the next expected from client.
|
||||
rsh.incNextCallSeq();
|
||||
}
|
||||
return builder.build();
|
||||
}
|
||||
|
||||
quota = getQuotaManager().checkQuota(region, OperationQuota.OperationType.SCAN);
|
||||
long maxQuotaResultSize = Math.min(maxScannerResultSize, quota.getReadAvailable());
|
||||
|
@ -2323,8 +2422,6 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
// where processing of request takes > lease expiration time.
|
||||
lease = regionServer.leases.removeLease(scannerName);
|
||||
List<Result> results = new ArrayList<Result>();
|
||||
long totalCellSize = 0;
|
||||
long currentScanResultSize = 0;
|
||||
|
||||
boolean done = false;
|
||||
// Call coprocessor. Get region info from scanner.
|
||||
|
@ -2333,10 +2430,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
scanner, results, rows);
|
||||
if (!results.isEmpty()) {
|
||||
for (Result r : results) {
|
||||
for (Cell cell : r.rawCells()) {
|
||||
totalCellSize += CellUtil.estimatedSerializedSizeOf(cell);
|
||||
currentScanResultSize += CellUtil.estimatedHeapSizeOfWithoutTags(cell);
|
||||
}
|
||||
lastBlock = addSize(context, r, lastBlock);
|
||||
}
|
||||
}
|
||||
if (bypass != null && bypass.booleanValue()) {
|
||||
|
@ -2368,7 +2462,7 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
// If the coprocessor host is adding to the result list, we cannot guarantee the
|
||||
// correct ordering of partial results and so we prevent partial results from being
|
||||
// formed.
|
||||
boolean serverGuaranteesOrderOfPartials = currentScanResultSize == 0;
|
||||
boolean serverGuaranteesOrderOfPartials = results.isEmpty();
|
||||
boolean allowPartialResults =
|
||||
clientHandlesPartials && serverGuaranteesOrderOfPartials && !isSmallScan;
|
||||
boolean moreRows = false;
|
||||
|
@ -2434,11 +2528,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
moreRows = scanner.nextRaw(values, scannerContext);
|
||||
|
||||
if (!values.isEmpty()) {
|
||||
for (Cell cell : values) {
|
||||
totalCellSize += CellUtil.estimatedSerializedSizeOf(cell);
|
||||
}
|
||||
final boolean partial = scannerContext.partialResultFormed();
|
||||
results.add(Result.create(values, null, stale, partial));
|
||||
Result r = Result.create(values, null, stale, partial);
|
||||
lastBlock = addSize(context, r, lastBlock);
|
||||
results.add(r);
|
||||
i++;
|
||||
}
|
||||
|
||||
|
@ -2490,9 +2583,10 @@ public class RSRpcServices implements HBaseRPCErrorHandler,
|
|||
}
|
||||
}
|
||||
region.updateReadRequestsCount(i);
|
||||
region.getMetrics().updateScanNext(totalCellSize);
|
||||
long responseCellSize = context != null ? context.getResponseCellSize() : 0;
|
||||
region.getMetrics().updateScanNext(responseCellSize);
|
||||
if (regionServer.metricsRegionServer != null) {
|
||||
regionServer.metricsRegionServer.updateScannerNext(totalCellSize);
|
||||
regionServer.metricsRegionServer.updateScannerNext(responseCellSize);
|
||||
}
|
||||
} finally {
|
||||
region.closeRegionOperation();
|
||||
|
|
|
@ -36,7 +36,13 @@ public interface ReplicationSinkService extends ReplicationService {
|
|||
* Carry on the list of log entries down to the sink
|
||||
* @param entries list of WALEntries to replicate
|
||||
* @param cells Cells that the WALEntries refer to (if cells is non-null)
|
||||
* @param replicationClusterId Id which will uniquely identify source cluster FS client
|
||||
* configurations in the replication configuration directory
|
||||
* @param sourceBaseNamespaceDirPath Path that point to the source cluster base namespace
|
||||
* directory required for replicating hfiles
|
||||
* @param sourceHFileArchiveDirPath Path that point to the source cluster hfile archive directory
|
||||
* @throws IOException
|
||||
*/
|
||||
void replicateLogEntries(List<WALEntry> entries, CellScanner cells) throws IOException;
|
||||
void replicateLogEntries(List<WALEntry> entries, CellScanner cells, String replicationClusterId,
|
||||
String sourceBaseNamespaceDirPath, String sourceHFileArchiveDirPath) throws IOException;
|
||||
}
|
||||
|
|
|
@ -80,10 +80,8 @@ public class RegionReplicaFlushHandler extends EventHandler {
|
|||
|
||||
@Override
|
||||
protected void handleException(Throwable t) {
|
||||
super.handleException(t);
|
||||
|
||||
if (t instanceof InterruptedIOException || t instanceof InterruptedException) {
|
||||
// ignore
|
||||
LOG.error("Caught throwable while processing event " + eventType, t);
|
||||
} else if (t instanceof RuntimeException) {
|
||||
server.abort("ServerAborting because a runtime exception was thrown", t);
|
||||
} else {
|
||||
|
|
|
@ -23,6 +23,8 @@ import java.io.FileNotFoundException;
|
|||
import java.io.IOException;
|
||||
import java.io.InterruptedIOException;
|
||||
import java.io.OutputStream;
|
||||
import java.lang.management.ManagementFactory;
|
||||
import java.lang.management.MemoryUsage;
|
||||
import java.lang.reflect.InvocationTargetException;
|
||||
import java.net.URLEncoder;
|
||||
import java.util.ArrayList;
|
||||
|
@ -62,6 +64,7 @@ import org.apache.hadoop.hbase.HConstants;
|
|||
import org.apache.hadoop.hbase.HRegionInfo;
|
||||
import org.apache.hadoop.hbase.HTableDescriptor;
|
||||
import org.apache.hadoop.hbase.classification.InterfaceAudience;
|
||||
import org.apache.hadoop.hbase.io.util.HeapMemorySizeUtil;
|
||||
import org.apache.hadoop.hbase.util.Bytes;
|
||||
import org.apache.hadoop.hbase.util.ClassSize;
|
||||
import org.apache.hadoop.hbase.util.DrainBarrier;
|
||||
|
@ -505,8 +508,16 @@ public class FSHLog implements WAL {
|
|||
FSUtils.getDefaultBlockSize(this.fs, this.fullPathLogDir));
|
||||
this.logrollsize =
|
||||
(long)(blocksize * conf.getFloat("hbase.regionserver.logroll.multiplier", 0.95f));
|
||||
|
||||
this.maxLogs = conf.getInt("hbase.regionserver.maxlogs", 32);
|
||||
|
||||
float memstoreRatio = conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY,
|
||||
conf.getFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_OLD_KEY,
|
||||
HeapMemorySizeUtil.DEFAULT_MEMSTORE_SIZE));
|
||||
boolean maxLogsDefined = conf.get("hbase.regionserver.maxlogs") != null;
|
||||
if(maxLogsDefined){
|
||||
LOG.warn("'hbase.regionserver.maxlogs' was deprecated.");
|
||||
}
|
||||
this.maxLogs = conf.getInt("hbase.regionserver.maxlogs",
|
||||
Math.max(32, calculateMaxLogFiles(memstoreRatio, logrollsize)));
|
||||
this.minTolerableReplication = conf.getInt("hbase.regionserver.hlog.tolerable.lowreplication",
|
||||
FSUtils.getDefaultReplication(fs, this.fullPathLogDir));
|
||||
this.lowReplicationRollLimit =
|
||||
|
@ -556,6 +567,12 @@ public class FSHLog implements WAL {
|
|||
this.disruptor.start();
|
||||
}
|
||||
|
||||
private int calculateMaxLogFiles(float memstoreSizeRatio, long logRollSize) {
|
||||
MemoryUsage mu = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage();
|
||||
int maxLogs = Math.round(mu.getMax() * memstoreSizeRatio * 2 / logRollSize);
|
||||
return maxLogs;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the backing files associated with this WAL.
|
||||
* @return may be null if there are no files.
|
||||
|
|
|
@ -85,17 +85,16 @@ public interface WALActionsListener {
|
|||
);
|
||||
|
||||
/**
|
||||
*
|
||||
* @param htd
|
||||
* @param logKey
|
||||
* @param logEdit
|
||||
* TODO: Retire this in favor of {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)}
|
||||
* It only exists to get scope when replicating. Scope should be in the WALKey and not need
|
||||
* us passing in a <code>htd</code>.
|
||||
* @param logEdit TODO: Retire this in favor of
|
||||
* {@link #visitLogEntryBeforeWrite(HRegionInfo, WALKey, WALEdit)} It only exists to get
|
||||
* scope when replicating. Scope should be in the WALKey and not need us passing in a
|
||||
* <code>htd</code>.
|
||||
* @throws IOException If failed to parse the WALEdit
|
||||
*/
|
||||
void visitLogEntryBeforeWrite(
|
||||
HTableDescriptor htd, WALKey logKey, WALEdit logEdit
|
||||
);
|
||||
void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit)
|
||||
throws IOException;
|
||||
|
||||
/**
|
||||
* For notification post append to the writer. Used by metrics system at least.
|
||||
|
@ -136,7 +135,9 @@ public interface WALActionsListener {
|
|||
public void visitLogEntryBeforeWrite(HRegionInfo info, WALKey logKey, WALEdit logEdit) {}
|
||||
|
||||
@Override
|
||||
public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit) {}
|
||||
public void visitLogEntryBeforeWrite(HTableDescriptor htd, WALKey logKey, WALEdit logEdit)
|
||||
throws IOException {
|
||||
}
|
||||
|
||||
@Override
|
||||
public void postAppend(final long entryLen, final long elapsedTimeMillis) {}
|
||||
|
|
|
@ -99,7 +99,7 @@ public class WALEdit implements Writable, HeapSize {
|
|||
private final int VERSION_2 = -1;
|
||||
private final boolean isReplay;
|
||||
|
||||
private final ArrayList<Cell> cells = new ArrayList<Cell>(1);
|
||||
private ArrayList<Cell> cells = new ArrayList<Cell>(1);
|
||||
|
||||
public static final WALEdit EMPTY_WALEDIT = new WALEdit();
|
||||
|
||||
|
@ -170,6 +170,18 @@ public class WALEdit implements Writable, HeapSize {
|
|||
return cells;
|
||||
}
|
||||
|
||||
/**
|
||||
* This is not thread safe.
|
||||
* This will change the WALEdit and shouldn't be used unless you are sure that nothing
|
||||
* else depends on the contents being immutable.
|
||||
*
|
||||
* @param cells the list of cells that this WALEdit now contains.
|
||||
*/
|
||||
@InterfaceAudience.Private
|
||||
public void setCells(ArrayList<Cell> cells) {
|
||||
this.cells = cells;
|
||||
}
|
||||
|
||||
public NavigableMap<byte[], Integer> getAndRemoveScopes() {
|
||||
NavigableMap<byte[], Integer> result = scopes;
|
||||
scopes = null;
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue